clang 23.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
31#include "clang/AST/StmtCXX.h"
32#include "clang/AST/StmtObjC.h"
39#include "llvm/ADT/ArrayRef.h"
40#include "llvm/ADT/ScopeExit.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/FPEnv.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/Support/CRC.h"
51#include "llvm/Support/SipHash.h"
52#include "llvm/Support/xxhash.h"
53#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
54#include "llvm/Transforms/Utils/PromoteMemToReg.h"
55#include <optional>
56
57using namespace clang;
58using namespace CodeGen;
59
60/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
61/// markers.
62static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
63 const LangOptions &LangOpts) {
64 if (CGOpts.DisableLifetimeMarkers)
65 return false;
66
67 // Sanitizers may use markers.
68 if (CGOpts.SanitizeAddressUseAfterScope ||
69 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
70 LangOpts.Sanitize.has(SanitizerKind::Memory) ||
71 LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
72 return true;
73
74 // For now, only in optimized builds.
75 return CGOpts.OptimizationLevel != 0;
76}
77
78CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
79 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
80 Builder(cgm, cgm.getModule().getContext(), CGBuilderInserterTy(this)),
82 DebugInfo(CGM.getModuleDebugInfo()),
83 PGO(std::make_unique<CodeGenPGO>(cgm)),
84 ShouldEmitLifetimeMarkers(
85 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
86 if (!suppressNewContext)
87 CGM.getCXXABI().getMangleContext().startNewFunction();
88 EHStack.setCGF(this);
89
91}
92
94 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
95 assert(DeferredDeactivationCleanupStack.empty() &&
96 "missed to deactivate a cleanup");
97
98 if (getLangOpts().OpenMP && CurFn)
99 CGM.getOpenMPRuntime().functionFinished(*this);
100
101 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
102 // outlining etc) at some point. Doing it once the function codegen is done
103 // seems to be a reasonable spot. We do it here, as opposed to the deletion
104 // time of the CodeGenModule, because we have to ensure the IR has not yet
105 // been "emitted" to the outside, thus, modifications are still sensible.
106 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
107 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
108}
109
110// Map the LangOption for exception behavior into
111// the corresponding enum in the IR.
112llvm::fp::ExceptionBehavior
114
115 switch (Kind) {
116 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
117 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
118 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
119 default:
120 llvm_unreachable("Unsupported FP Exception Behavior");
121 }
122}
123
125 llvm::FastMathFlags FMF;
126 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
127 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
128 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
129 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
130 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
131 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
132 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
133 Builder.setFastMathFlags(FMF);
134}
135
137 const Expr *E)
138 : CGF(CGF) {
139 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
140}
141
143 FPOptions FPFeatures)
144 : CGF(CGF) {
145 ConstructorHelper(FPFeatures);
146}
147
148void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
149 OldFPFeatures = CGF.CurFPFeatures;
150 CGF.CurFPFeatures = FPFeatures;
151
152 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
153 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
154
155 if (OldFPFeatures == FPFeatures)
156 return;
157
158 FMFGuard.emplace(CGF.Builder);
159
160 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
161 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
162 auto NewExceptionBehavior =
164 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
165
166 CGF.SetFastMathFlags(FPFeatures);
167
168 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
169 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
170 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
171 (NewExceptionBehavior == llvm::fp::ebIgnore &&
172 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
173 "FPConstrained should be enabled on entire function");
174
175 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
176 auto OldValue =
177 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
178 auto NewValue = OldValue & Value;
179 if (OldValue != NewValue)
180 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
181 };
182 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
183}
184
186 CGF.CurFPFeatures = OldFPFeatures;
187 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
188 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
189}
190
191static LValue
192makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
193 bool MightBeSigned, CodeGenFunction &CGF,
194 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
195 LValueBaseInfo BaseInfo;
196 TBAAAccessInfo TBAAInfo;
197 CharUnits Alignment =
198 CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
199 Address Addr =
200 MightBeSigned
201 ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
202 nullptr, IsKnownNonNull)
203 : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
204 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
205}
206
207LValue
209 KnownNonNull_t IsKnownNonNull) {
210 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
211 /*MightBeSigned*/ true, *this,
212 IsKnownNonNull);
213}
214
215LValue
217 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
218 /*MightBeSigned*/ true, *this);
219}
220
222 QualType T) {
223 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
224 /*MightBeSigned*/ false, *this);
225}
226
228 QualType T) {
229 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
230 /*MightBeSigned*/ false, *this);
231}
232
234 return CGM.getTypes().ConvertTypeForMem(T);
235}
236
238 return CGM.getTypes().ConvertType(T);
239}
240
242 llvm::Type *LLVMTy) {
243 return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
244}
245
247 type = type.getCanonicalType();
248 while (true) {
249 switch (type->getTypeClass()) {
250#define TYPE(name, parent)
251#define ABSTRACT_TYPE(name, parent)
252#define NON_CANONICAL_TYPE(name, parent) case Type::name:
253#define DEPENDENT_TYPE(name, parent) case Type::name:
254#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
255#include "clang/AST/TypeNodes.inc"
256 llvm_unreachable("non-canonical or dependent type in IR-generation");
257
258 case Type::Auto:
259 case Type::DeducedTemplateSpecialization:
260 llvm_unreachable("undeduced type in IR-generation");
261
262 // Various scalar types.
263 case Type::Builtin:
264 case Type::Pointer:
265 case Type::BlockPointer:
266 case Type::LValueReference:
267 case Type::RValueReference:
268 case Type::MemberPointer:
269 case Type::Vector:
270 case Type::ExtVector:
271 case Type::ConstantMatrix:
272 case Type::FunctionProto:
273 case Type::FunctionNoProto:
274 case Type::Enum:
275 case Type::ObjCObjectPointer:
276 case Type::Pipe:
277 case Type::BitInt:
278 case Type::HLSLAttributedResource:
279 case Type::HLSLInlineSpirv:
280 case Type::OverflowBehavior:
281 return TEK_Scalar;
282
283 // Complexes.
284 case Type::Complex:
285 return TEK_Complex;
286
287 // Arrays, records, and Objective-C objects.
288 case Type::ConstantArray:
289 case Type::IncompleteArray:
290 case Type::VariableArray:
291 case Type::Record:
292 case Type::ObjCObject:
293 case Type::ObjCInterface:
294 case Type::ArrayParameter:
295 return TEK_Aggregate;
296
297 // We operate on atomic values according to their underlying type.
298 case Type::Atomic:
299 type = cast<AtomicType>(type)->getValueType();
300 continue;
301 }
302 llvm_unreachable("unknown type kind!");
303 }
304}
305
307 // For cleanliness, we try to avoid emitting the return block for
308 // simple cases.
309 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
310
311 if (CurBB) {
312 assert(!CurBB->hasTerminator() && "Unexpected terminated block.");
313
314 // We have a valid insert point, reuse it if it is empty or there are no
315 // explicit jumps to the return block.
316 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
317 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
318 delete ReturnBlock.getBlock();
320 } else
321 EmitBlock(ReturnBlock.getBlock());
322 return llvm::DebugLoc();
323 }
324
325 // Otherwise, if the return block is the target of a single direct
326 // branch then we can just put the code in that block instead. This
327 // cleans up functions which started with a unified return block.
328 if (ReturnBlock.getBlock()->hasOneUse()) {
329 auto *BI =
330 dyn_cast<llvm::UncondBrInst>(*ReturnBlock.getBlock()->user_begin());
331 if (BI && BI->getSuccessor(0) == ReturnBlock.getBlock()) {
332 // Record/return the DebugLoc of the simple 'return' expression to be used
333 // later by the actual 'ret' instruction.
334 llvm::DebugLoc Loc = BI->getDebugLoc();
335 Builder.SetInsertPoint(BI->getParent());
336 BI->eraseFromParent();
337 delete ReturnBlock.getBlock();
339 return Loc;
340 }
341 }
342
343 // FIXME: We are at an unreachable point, there is no reason to emit the block
344 // unless it has uses. However, we still need a place to put the debug
345 // region.end for now.
346
347 EmitBlock(ReturnBlock.getBlock());
348 return llvm::DebugLoc();
349}
350
351static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
352 if (!BB) return;
353 if (!BB->use_empty()) {
354 CGF.CurFn->insert(CGF.CurFn->end(), BB);
355 return;
356 }
357 delete BB;
358}
359
361 assert(BreakContinueStack.empty() &&
362 "mismatched push/pop in break/continue stack!");
363 assert(LifetimeExtendedCleanupStack.empty() &&
364 "mismatched push/pop of cleanups in EHStack!");
365 assert(DeferredDeactivationCleanupStack.empty() &&
366 "mismatched activate/deactivate of cleanups!");
367
368 if (CGM.shouldEmitConvergenceTokens()) {
369 ConvergenceTokenStack.pop_back();
370 assert(ConvergenceTokenStack.empty() &&
371 "mismatched push/pop in convergence stack!");
372 }
373
374 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
375 && NumSimpleReturnExprs == NumReturnExprs
376 && ReturnBlock.getBlock()->use_empty();
377 // Usually the return expression is evaluated before the cleanup
378 // code. If the function contains only a simple return statement,
379 // such as a constant, the location before the cleanup code becomes
380 // the last useful breakpoint in the function, because the simple
381 // return expression will be evaluated after the cleanup code. To be
382 // safe, set the debug location for cleanup code to the location of
383 // the return statement. Otherwise the cleanup code should be at the
384 // end of the function's lexical scope.
385 //
386 // If there are multiple branches to the return block, the branch
387 // instructions will get the location of the return statements and
388 // all will be fine.
389 if (CGDebugInfo *DI = getDebugInfo()) {
390 if (OnlySimpleReturnStmts)
391 DI->EmitLocation(Builder, LastStopPoint);
392 else
393 DI->EmitLocation(Builder, EndLoc);
394 }
395
396 // Pop any cleanups that might have been associated with the
397 // parameters. Do this in whatever block we're currently in; it's
398 // important to do this before we enter the return block or return
399 // edges will be *really* confused.
400 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
401 bool HasOnlyNoopCleanups =
402 HasCleanups && EHStack.containsOnlyNoopCleanups(PrologueCleanupDepth);
403 bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
404
405 std::optional<ApplyDebugLocation> OAL;
406 if (HasCleanups) {
407 // Make sure the line table doesn't jump back into the body for
408 // the ret after it's been at EndLoc.
409 if (CGDebugInfo *DI = getDebugInfo()) {
410 if (OnlySimpleReturnStmts)
411 DI->EmitLocation(Builder, EndLoc);
412 else
413 // We may not have a valid end location. Try to apply it anyway, and
414 // fall back to an artificial location if needed.
416 }
417
419 }
420
421 // Emit function epilog (to return).
422 llvm::DebugLoc Loc = EmitReturnBlock();
423
425 if (CGM.getCodeGenOpts().InstrumentFunctions)
426 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
427 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
428 CurFn->addFnAttr("instrument-function-exit-inlined",
429 "__cyg_profile_func_exit");
430 }
431
432 // Emit debug descriptor for function end.
433 if (CGDebugInfo *DI = getDebugInfo())
434 DI->EmitFunctionEnd(Builder, CurFn);
435
436 // Reset the debug location to that of the simple 'return' expression, if any
437 // rather than that of the end of the function's scope '}'.
438 uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
439 ApplyDebugLocation AL(*this, Loc);
440 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc,
441 RetKeyInstructionsAtomGroup);
443
444 assert(EHStack.empty() &&
445 "did not remove all scopes from cleanup stack!");
446
447 // If someone did an indirect goto, emit the indirect goto block at the end of
448 // the function.
449 if (IndirectBranch) {
450 EmitBlock(IndirectBranch->getParent());
451 Builder.ClearInsertionPoint();
452 }
453
454 // If some of our locals escaped, insert a call to llvm.localescape in the
455 // entry block.
456 if (!EscapedLocals.empty()) {
457 // Invert the map from local to index into a simple vector. There should be
458 // no holes.
460 EscapeArgs.resize(EscapedLocals.size());
461 for (auto &Pair : EscapedLocals)
462 EscapeArgs[Pair.second] = Pair.first;
463 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
464 &CGM.getModule(), llvm::Intrinsic::localescape);
465 CGBuilderTy(CGM, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
466 }
467
468 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
469 llvm::Instruction *Ptr = AllocaInsertPt;
470 AllocaInsertPt = nullptr;
471 Ptr->eraseFromParent();
472
473 // PostAllocaInsertPt, if created, was lazily created when it was required,
474 // remove it now since it was just created for our own convenience.
475 if (PostAllocaInsertPt) {
476 llvm::Instruction *PostPtr = PostAllocaInsertPt;
477 PostAllocaInsertPt = nullptr;
478 PostPtr->eraseFromParent();
479 }
480
481 // If someone took the address of a label but never did an indirect goto, we
482 // made a zero entry PHI node, which is illegal, zap it now.
483 if (IndirectBranch) {
484 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
485 if (PN->getNumIncomingValues() == 0) {
486 PN->replaceAllUsesWith(llvm::PoisonValue::get(PN->getType()));
487 PN->eraseFromParent();
488 }
489 }
490
492 EmitIfUsed(*this, TerminateLandingPad);
493 EmitIfUsed(*this, TerminateHandler);
494 EmitIfUsed(*this, UnreachableBlock);
495
496 for (const auto &FuncletAndParent : TerminateFunclets)
497 EmitIfUsed(*this, FuncletAndParent.second);
498
499 if (CGM.getCodeGenOpts().EmitDeclMetadata)
500 EmitDeclMetadata();
501
502 for (const auto &R : DeferredReplacements) {
503 if (llvm::Value *Old = R.first) {
504 Old->replaceAllUsesWith(R.second);
505 cast<llvm::Instruction>(Old)->eraseFromParent();
506 }
507 }
508 DeferredReplacements.clear();
509
510 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
511 // PHIs if the current function is a coroutine. We don't do it for all
512 // functions as it may result in slight increase in numbers of instructions
513 // if compiled with no optimizations. We do it for coroutine as the lifetime
514 // of CleanupDestSlot alloca make correct coroutine frame building very
515 // difficult.
516 if (NormalCleanupDest.isValid() && isCoroutine()) {
517 llvm::DominatorTree DT(*CurFn);
518 llvm::PromoteMemToReg(
519 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
521 }
522
523 // Scan function arguments for vector width.
524 for (llvm::Argument &A : CurFn->args())
525 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
526 LargestVectorWidth =
527 std::max((uint64_t)LargestVectorWidth,
528 VT->getPrimitiveSizeInBits().getKnownMinValue());
529
530 // Update vector width based on return type.
531 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
532 LargestVectorWidth =
533 std::max((uint64_t)LargestVectorWidth,
534 VT->getPrimitiveSizeInBits().getKnownMinValue());
535
536 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
537 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
538
539 // Add the min-legal-vector-width attribute. This contains the max width from:
540 // 1. min-vector-width attribute used in the source program.
541 // 2. Any builtins used that have a vector width specified.
542 // 3. Values passed in and out of inline assembly.
543 // 4. Width of vector arguments and return types for this function.
544 // 5. Width of vector arguments and return types for functions called by this
545 // function.
546 if (getContext().getTargetInfo().getTriple().isX86())
547 CurFn->addFnAttr("min-legal-vector-width",
548 llvm::utostr(LargestVectorWidth));
549
550 // If we generated an unreachable return block, delete it now.
551 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
552 Builder.ClearInsertionPoint();
553 ReturnBlock.getBlock()->eraseFromParent();
554 }
555 if (ReturnValue.isValid()) {
556 auto *RetAlloca =
557 dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
558 if (RetAlloca && RetAlloca->use_empty()) {
559 RetAlloca->eraseFromParent();
561 }
562 }
563}
564
565/// ShouldInstrumentFunction - Return true if the current function should be
566/// instrumented with __cyg_profile_func_* calls
568 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
569 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
570 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
571 return false;
572 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
573 return false;
574 return true;
575}
576
578 if (!CurFuncDecl)
579 return false;
580 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
581}
582
583/// ShouldXRayInstrument - Return true if the current function should be
584/// instrumented with XRay nop sleds.
586 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
587}
588
589/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
590/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
592 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
593 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
594 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
596}
597
599 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
600 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
601 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
603}
604
605llvm::ConstantInt *
607 // Remove any (C++17) exception specifications, to allow calling e.g. a
608 // noexcept function through a non-noexcept pointer.
609 if (!Ty->isFunctionNoProtoType())
611 std::string Mangled;
612 llvm::raw_string_ostream Out(Mangled);
613 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
614 return llvm::ConstantInt::get(
615 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
616}
617
618void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
619 llvm::Function *Fn) {
620 if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
621 return;
622
623 llvm::LLVMContext &Context = getLLVMContext();
624
625 CGM.GenKernelArgMetadata(Fn, FD, this);
626
627 if (!(getLangOpts().OpenCL ||
628 (getLangOpts().CUDA &&
629 getContext().getTargetInfo().getTriple().isSPIRV())))
630 return;
631
632 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
633 QualType HintQTy = A->getTypeHint();
634 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
635 bool IsSignedInteger =
636 HintQTy->isSignedIntegerType() ||
637 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
638 llvm::Metadata *AttrMDArgs[] = {
639 llvm::ConstantAsMetadata::get(llvm::PoisonValue::get(
640 CGM.getTypes().ConvertType(A->getTypeHint()))),
641 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
642 llvm::IntegerType::get(Context, 32),
643 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
644 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
645 }
646
647 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
648 auto Eval = [&](Expr *E) {
649 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
650 };
651 llvm::Metadata *AttrMDArgs[] = {
652 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
653 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
654 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
655 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
656 }
657
658 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
659 auto Eval = [&](Expr *E) {
660 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
661 };
662 llvm::Metadata *AttrMDArgs[] = {
663 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
664 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
665 llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
666 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
667 }
668
669 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
670 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
671 llvm::Metadata *AttrMDArgs[] = {
672 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
673 Fn->setMetadata("intel_reqd_sub_group_size",
674 llvm::MDNode::get(Context, AttrMDArgs));
675 }
676}
677
678/// Determine whether the function F ends with a return stmt.
679static bool endsWithReturn(const Decl* F) {
680 const Stmt *Body = nullptr;
681 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
682 Body = FD->getBody();
683 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
684 Body = OMD->getBody();
685
686 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
687 auto LastStmt = CS->body_rbegin();
688 if (LastStmt != CS->body_rend())
689 return isa<ReturnStmt>(*LastStmt);
690 }
691 return false;
692}
693
695 if (SanOpts.has(SanitizerKind::Thread)) {
696 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
697 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
698 }
699}
700
701/// Check if the return value of this function requires sanitization.
702bool CodeGenFunction::requiresReturnValueCheck() const {
703 return requiresReturnValueNullabilityCheck() ||
704 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
705 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
706}
707
708static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
709 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
710 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
711 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
712 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
713 return false;
714
715 if (!Ctx.hasSameType(MD->parameters()[0]->getType(), Ctx.getSizeType()))
716 return false;
717
718 if (MD->getNumParams() == 2) {
719 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
720 if (!PT || !PT->isVoidPointerType() ||
721 !PT->getPointeeType().isConstQualified())
722 return false;
723 }
724
725 return true;
726}
727
728bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
729 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
730 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
731}
732
733bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
734 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
736 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
737 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
738 });
739}
740
741/// Return the UBSan prologue signature for \p FD if one is available.
742static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
743 const FunctionDecl *FD) {
744 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
745 if (!MD->isStatic())
746 return nullptr;
748}
749
751 llvm::Function *Fn,
752 const CGFunctionInfo &FnInfo,
753 const FunctionArgList &Args,
754 SourceLocation Loc,
755 SourceLocation StartLoc) {
756 assert(!CurFn &&
757 "Do not use a CodeGenFunction object for more than one function");
758
759 const Decl *D = GD.getDecl();
760
761 DidCallStackSave = false;
762 CurCodeDecl = D;
763 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
764 if (FD && FD->usesSEHTry())
765 CurSEHParent = GD;
766 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
767 FnRetTy = RetTy;
768 CurFn = Fn;
769 CurFnInfo = &FnInfo;
770 assert(CurFn->isDeclaration() && "Function already has body?");
771
772 // If this function is ignored for any of the enabled sanitizers,
773 // disable the sanitizer for the function.
774 do {
775#define SANITIZER(NAME, ID) \
776 if (SanOpts.empty()) \
777 break; \
778 if (SanOpts.has(SanitizerKind::ID)) \
779 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
780 SanOpts.set(SanitizerKind::ID, false);
781
782#include "clang/Basic/Sanitizers.def"
783#undef SANITIZER
784 } while (false);
785
786 if (D) {
787 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
788 SanitizerMask no_sanitize_mask;
789 bool NoSanitizeCoverage = false;
790
791 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
792 no_sanitize_mask |= Attr->getMask();
793 // SanitizeCoverage is not handled by SanOpts.
794 if (Attr->hasCoverage())
795 NoSanitizeCoverage = true;
796 }
797
798 // Apply the no_sanitize* attributes to SanOpts.
799 SanOpts.Mask &= ~no_sanitize_mask;
800 if (no_sanitize_mask & SanitizerKind::Address)
801 SanOpts.set(SanitizerKind::KernelAddress, false);
802 if (no_sanitize_mask & SanitizerKind::KernelAddress)
803 SanOpts.set(SanitizerKind::Address, false);
804 if (no_sanitize_mask & SanitizerKind::HWAddress)
805 SanOpts.set(SanitizerKind::KernelHWAddress, false);
806 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
807 SanOpts.set(SanitizerKind::HWAddress, false);
808
809 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
810 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
811
812 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
813 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
814
815 // Some passes need the non-negated no_sanitize attribute. Pass them on.
816 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
817 if (no_sanitize_mask & SanitizerKind::Thread)
818 Fn->addFnAttr("no_sanitize_thread");
819 }
820 }
821
823 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
824 } else {
825 // Apply sanitizer attributes to the function.
826 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
827 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
828 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
829 SanitizerKind::KernelHWAddress))
830 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
831 if (SanOpts.has(SanitizerKind::MemtagStack))
832 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
833 if (SanOpts.has(SanitizerKind::Thread))
834 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
835 if (SanOpts.has(SanitizerKind::Type))
836 Fn->addFnAttr(llvm::Attribute::SanitizeType);
837 if (SanOpts.has(SanitizerKind::NumericalStability))
838 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
839 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
840 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
841 if (SanOpts.has(SanitizerKind::AllocToken))
842 Fn->addFnAttr(llvm::Attribute::SanitizeAllocToken);
843 }
844 if (SanOpts.has(SanitizerKind::SafeStack))
845 Fn->addFnAttr(llvm::Attribute::SafeStack);
846 if (SanOpts.has(SanitizerKind::ShadowCallStack))
847 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
848
849 if (SanOpts.has(SanitizerKind::Realtime))
850 if (FD && FD->getASTContext().hasAnyFunctionEffects())
851 for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
852 if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
853 Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
854 else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
855 Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
856 }
857
858 // Apply fuzzing attribute to the function.
859 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
860 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
861
862 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
863 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
864 if (SanOpts.has(SanitizerKind::Thread)) {
865 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
866 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
867 if (OMD->getMethodFamily() == OMF_dealloc ||
868 OMD->getMethodFamily() == OMF_initialize ||
869 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
871 }
872 }
873 }
874
875 // Ignore unrelated casts in STL allocate() since the allocator must cast
876 // from void* to T* before object initialization completes. Don't match on the
877 // namespace because not all allocators are in std::
878 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
880 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
881 }
882
883 // Ignore null checks in coroutine functions since the coroutines passes
884 // are not aware of how to move the extra UBSan instructions across the split
885 // coroutine boundaries.
886 if (D && SanOpts.has(SanitizerKind::Null))
887 if (FD && FD->getBody() &&
888 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
889 SanOpts.Mask &= ~SanitizerKind::Null;
890
891 // Apply xray attributes to the function (as a string, for now)
892 bool AlwaysXRayAttr = false;
893 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
894 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
896 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
898 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
899 Fn->addFnAttr("function-instrument", "xray-always");
900 AlwaysXRayAttr = true;
901 }
902 if (XRayAttr->neverXRayInstrument())
903 Fn->addFnAttr("function-instrument", "xray-never");
904 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
906 Fn->addFnAttr("xray-log-args",
907 llvm::utostr(LogArgs->getArgumentCount()));
908 }
909 } else {
910 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
911 Fn->addFnAttr(
912 "xray-instruction-threshold",
913 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
914 }
915
917 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
918 Fn->addFnAttr("xray-ignore-loops");
919
920 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
922 Fn->addFnAttr("xray-skip-exit");
923
924 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
926 Fn->addFnAttr("xray-skip-entry");
927
928 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
929 if (FuncGroups > 1) {
930 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
931 CurFn->getName().bytes_end());
932 auto Group = crc32(FuncName) % FuncGroups;
933 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
934 !AlwaysXRayAttr)
935 Fn->addFnAttr("function-instrument", "xray-never");
936 }
937 }
938
939 if (CGM.getCodeGenOpts().getProfileInstr() !=
940 llvm::driver::ProfileInstrKind::ProfileNone) {
941 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
943 Fn->addFnAttr(llvm::Attribute::SkipProfile);
944 break;
946 Fn->addFnAttr(llvm::Attribute::NoProfile);
947 break;
949 break;
950 }
951 }
952
953 unsigned Count, Offset;
954 StringRef Section;
955 if (const auto *Attr =
956 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
957 Count = Attr->getCount();
958 Offset = Attr->getOffset();
959 Section = Attr->getSection();
960 } else {
961 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
962 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
963 }
964 if (Section.empty())
965 Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
966 if (Count && Offset <= Count) {
967 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
968 if (Offset)
969 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
970 if (!Section.empty())
971 Fn->addFnAttr("patchable-function-entry-section", Section);
972 }
973 // Instruct that functions for COFF/CodeView targets should start with a
974 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
975 // backends as they don't need it -- instructions on these architectures are
976 // always atomically patchable at runtime.
977 if (CGM.getCodeGenOpts().HotPatch &&
978 getContext().getTargetInfo().getTriple().isX86() &&
979 getContext().getTargetInfo().getTriple().getEnvironment() !=
980 llvm::Triple::CODE16)
981 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
982
983 // Add no-jump-tables value.
984 if (CGM.getCodeGenOpts().NoUseJumpTables)
985 Fn->addFnAttr("no-jump-tables", "true");
986
987 // Add no-inline-line-tables value.
988 if (CGM.getCodeGenOpts().NoInlineLineTables)
989 Fn->addFnAttr("no-inline-line-tables");
990
991 // Add profile-sample-accurate value.
992 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
993 Fn->addFnAttr("profile-sample-accurate");
994
995 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
996 Fn->addFnAttr("use-sample-profile");
997
998 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
999 Fn->addFnAttr("cfi-canonical-jump-table");
1000
1001 if (D && D->hasAttr<NoProfileFunctionAttr>())
1002 Fn->addFnAttr(llvm::Attribute::NoProfile);
1003
1004 if (D && D->hasAttr<HybridPatchableAttr>())
1005 Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1006
1007 if (D) {
1008 // Function attributes take precedence over command line flags.
1009 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1010 switch (A->getThunkType()) {
1011 case FunctionReturnThunksAttr::Kind::Keep:
1012 break;
1013 case FunctionReturnThunksAttr::Kind::Extern:
1014 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1015 break;
1016 }
1017 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1018 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1019 }
1020
1021 if (FD && (getLangOpts().OpenCL ||
1022 (getLangOpts().CUDA &&
1023 getContext().getTargetInfo().getTriple().isSPIRV()) ||
1024 ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1025 getLangOpts().CUDAIsDevice))) {
1026 // Add metadata for a kernel function.
1027 EmitKernelMetadata(FD, Fn);
1028 }
1029
1030 if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1031 Fn->setMetadata("clspv_libclc_builtin",
1032 llvm::MDNode::get(getLLVMContext(), {}));
1033 }
1034
1035 // If we are checking function types, emit a function type signature as
1036 // prologue data. Kernel functions have strict alignment requirements and
1037 // cannot be call indirectly so we do not instrument them.
1038 if (FD && SanOpts.has(SanitizerKind::Function) &&
1040 llvm::isCallableCC(Fn->getCallingConv())) {
1041 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1042 llvm::LLVMContext &Ctx = Fn->getContext();
1043 llvm::MDBuilder MDB(Ctx);
1044 Fn->setMetadata(
1045 llvm::LLVMContext::MD_func_sanitize,
1046 MDB.createRTTIPointerPrologue(
1047 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
1048 }
1049 }
1050
1051 // If we're checking nullability, we need to know whether we can check the
1052 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1053 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
1054 auto Nullability = FnRetTy->getNullability();
1055 if (Nullability && *Nullability == NullabilityKind::NonNull &&
1056 !FnRetTy->isRecordType()) {
1057 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1058 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1059 RetValNullabilityPrecondition =
1060 llvm::ConstantInt::getTrue(getLLVMContext());
1061 }
1062 }
1063
1064 // If we're in C++ mode and the function name is "main", it is guaranteed
1065 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1066 // used within a program").
1067 //
1068 // OpenCL C 2.0 v2.2-11 s6.9.i:
1069 // Recursion is not supported.
1070 //
1071 // HLSL
1072 // Recursion is not supported.
1073 //
1074 // SYCL v1.2.1 s3.10:
1075 // kernels cannot include RTTI information, exception classes,
1076 // recursive code, virtual functions or make use of C++ libraries that
1077 // are not compiled for the device.
1078 if (FD &&
1079 ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1080 getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1081 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1082 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1083
1084 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1085 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1086 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1087 Builder.setDefaultConstrainedRounding(RM);
1088 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1089 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1090 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1091 RM != llvm::RoundingMode::NearestTiesToEven))) {
1092 Builder.setIsFPConstrained(true);
1093 Fn->addFnAttr(llvm::Attribute::StrictFP);
1094 }
1095
1096 // If a custom alignment is used, force realigning to this alignment on
1097 // any main function which certainly will need it.
1098 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1099 CGM.getCodeGenOpts().StackAlignment))
1100 Fn->addFnAttr("stackrealign");
1101
1102 // "main" doesn't need to zero out call-used registers.
1103 if (FD && FD->isMain())
1104 Fn->removeFnAttr("zero-call-used-regs");
1105
1106 // Add vscale_range attribute if appropriate.
1107 llvm::StringMap<bool> FeatureMap;
1108 auto IsArmStreaming = TargetInfo::ArmStreamingKind::NotStreaming;
1109 if (FD) {
1110 getContext().getFunctionFeatureMap(FeatureMap, FD);
1111 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1112 if (T->getAArch64SMEAttributes() &
1115
1116 if (IsArmStreamingFunction(FD, true))
1118 }
1119 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1120 getContext().getTargetInfo().getVScaleRange(getLangOpts(), IsArmStreaming,
1121 &FeatureMap);
1122 if (VScaleRange) {
1123 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
1124 getLLVMContext(), VScaleRange->first, VScaleRange->second));
1125 }
1126
1127 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1128
1129 // Create a marker to make it easy to insert allocas into the entryblock
1130 // later. Don't create this with the builder, because we don't want it
1131 // folded.
1132 llvm::Value *Poison = llvm::PoisonValue::get(Int32Ty);
1133 AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1134
1136
1137 Builder.SetInsertPoint(EntryBB);
1138
1139 // If we're checking the return value, allocate space for a pointer to a
1140 // precise source location of the checked return statement.
1141 if (requiresReturnValueCheck()) {
1142 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1143 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1144 ReturnLocation);
1145 }
1146
1147 // Emit subprogram debug descriptor.
1148 if (CGDebugInfo *DI = getDebugInfo()) {
1149 // Reconstruct the type from the argument list so that implicit parameters,
1150 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1151 // convention.
1152 DI->emitFunctionStart(GD, Loc, StartLoc,
1153 DI->getFunctionType(FD, RetTy, Args), CurFn,
1155 }
1156
1158 if (CGM.getCodeGenOpts().InstrumentFunctions)
1159 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1160 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1161 CurFn->addFnAttr("instrument-function-entry-inlined",
1162 "__cyg_profile_func_enter");
1163 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1164 CurFn->addFnAttr("instrument-function-entry-inlined",
1165 "__cyg_profile_func_enter_bare");
1166 }
1167
1168 // Since emitting the mcount call here impacts optimizations such as function
1169 // inlining, we just add an attribute to insert a mcount call in backend.
1170 // The attribute "counting-function" is set to mcount function name which is
1171 // architecture dependent.
1172 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1173 // Calls to fentry/mcount should not be generated if function has
1174 // the no_instrument_function attribute.
1175 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1176 if (CGM.getCodeGenOpts().CallFEntry)
1177 Fn->addFnAttr("fentry-call", "true");
1178 else {
1179 Fn->addFnAttr("instrument-function-entry-inlined",
1180 getTarget().getMCountName());
1181 }
1182 if (CGM.getCodeGenOpts().MNopMCount) {
1183 if (!CGM.getCodeGenOpts().CallFEntry)
1184 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1185 << "-mnop-mcount" << "-mfentry";
1186 Fn->addFnAttr("mnop-mcount");
1187 }
1188
1189 if (CGM.getCodeGenOpts().RecordMCount) {
1190 if (!CGM.getCodeGenOpts().CallFEntry)
1191 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1192 << "-mrecord-mcount" << "-mfentry";
1193 Fn->addFnAttr("mrecord-mcount");
1194 }
1195 }
1196 }
1197
1198 if (CGM.getCodeGenOpts().PackedStack) {
1199 if (getContext().getTargetInfo().getTriple().getArch() !=
1200 llvm::Triple::systemz)
1201 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1202 << "-mpacked-stack";
1203 Fn->addFnAttr("packed-stack");
1204 }
1205
1206 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1207 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1208 Fn->addFnAttr("warn-stack-size",
1209 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1210
1211 if (RetTy->isVoidType()) {
1212 // Void type; nothing to return.
1214
1215 // Count the implicit return.
1216 if (!endsWithReturn(D))
1217 ++NumReturnExprs;
1218 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1219 // Indirect return; emit returned value directly into sret slot.
1220 // This reduces code size, and affects correctness in C++.
1221 auto AI = CurFn->arg_begin();
1222 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1223 ++AI;
1225 &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1226 nullptr, nullptr, KnownNonNull);
1227 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1229 CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
1230 Builder.CreateStore(ReturnValue.emitRawPointer(*this),
1232 }
1233 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1234 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1235 // Load the sret pointer from the argument struct and return into that.
1236 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1237 llvm::Function::arg_iterator EI = CurFn->arg_end();
1238 --EI;
1239 llvm::Value *Addr = Builder.CreateStructGEP(
1240 CurFnInfo->getArgStruct(), &*EI, Idx);
1241 llvm::Type *Ty =
1242 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1244 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1246 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1247 } else {
1248 ReturnValue = CreateIRTempWithoutCast(RetTy, "retval");
1249
1250 // Tell the epilog emitter to autorelease the result. We do this
1251 // now so that various specialized functions can suppress it
1252 // during their IR-generation.
1253 if (getLangOpts().ObjCAutoRefCount &&
1254 !CurFnInfo->isReturnsRetained() &&
1255 RetTy->isObjCRetainableType())
1256 AutoreleaseResult = true;
1257 }
1258
1260
1261 PrologueCleanupDepth = EHStack.stable_begin();
1262
1263 // Emit OpenMP specific initialization of the device functions.
1264 if (getLangOpts().OpenMP && CurCodeDecl)
1265 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1266
1267 if (FD && getLangOpts().HLSL) {
1268 // Handle emitting HLSL entry functions.
1269 if (FD->hasAttr<HLSLShaderAttr>()) {
1270 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1271 }
1272 }
1273
1275
1276 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1277 MD && !MD->isStatic()) {
1278 bool IsInLambda =
1279 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1281 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1282 if (IsInLambda) {
1283 // We're in a lambda; figure out the captures.
1287 // If the lambda captures the object referred to by '*this' - either by
1288 // value or by reference, make sure CXXThisValue points to the correct
1289 // object.
1290
1291 // Get the lvalue for the field (which is a copy of the enclosing object
1292 // or contains the address of the enclosing object).
1294 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1295 // If the enclosing object was captured by value, just use its
1296 // address. Sign this pointer.
1297 CXXThisValue = ThisFieldLValue.getPointer(*this);
1298 } else {
1299 // Load the lvalue pointed to by the field, since '*this' was captured
1300 // by reference.
1301 CXXThisValue =
1302 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1303 }
1304 }
1305 for (auto *FD : MD->getParent()->fields()) {
1306 if (FD->hasCapturedVLAType()) {
1307 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1309 auto VAT = FD->getCapturedVLAType();
1310 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1311 }
1312 }
1313 } else if (MD->isImplicitObjectMemberFunction()) {
1314 // Not in a lambda; just use 'this' from the method.
1315 // FIXME: Should we generate a new load for each use of 'this'? The
1316 // fast register allocator would be happier...
1317 CXXThisValue = CXXABIThisValue;
1318 }
1319
1320 // Check the 'this' pointer once per function, if it's available.
1321 if (CXXABIThisValue) {
1322 SanitizerSet SkippedChecks;
1323 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1324 QualType ThisTy = MD->getThisType();
1325
1326 // If this is the call operator of a lambda with no captures, it
1327 // may have a static invoker function, which may call this operator with
1328 // a null 'this' pointer.
1330 SkippedChecks.set(SanitizerKind::Null, true);
1331
1334 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1335 }
1336 }
1337
1338 // If any of the arguments have a variably modified type, make sure to
1339 // emit the type size, but only if the function is not naked. Naked functions
1340 // have no prolog to run this evaluation.
1341 if (!FD || !FD->hasAttr<NakedAttr>()) {
1342 for (const VarDecl *VD : Args) {
1343 // Dig out the type as written from ParmVarDecls; it's unclear whether
1344 // the standard (C99 6.9.1p10) requires this, but we're following the
1345 // precedent set by gcc.
1346 QualType Ty;
1347 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1348 Ty = PVD->getOriginalType();
1349 else
1350 Ty = VD->getType();
1351
1352 if (Ty->isVariablyModifiedType())
1354 }
1355 }
1356 // Emit a location at the end of the prologue.
1357 if (CGDebugInfo *DI = getDebugInfo())
1358 DI->EmitLocation(Builder, StartLoc);
1359 // TODO: Do we need to handle this in two places like we do with
1360 // target-features/target-cpu?
1361 if (CurFuncDecl)
1362 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1363 LargestVectorWidth = VecWidth->getVectorWidth();
1364
1365 if (CGM.shouldEmitConvergenceTokens())
1366 ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
1367}
1368
1372 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1374 else
1375 EmitStmt(Body);
1376}
1377
1378/// When instrumenting to collect profile data, the counts for some blocks
1379/// such as switch cases need to not include the fall-through counts, so
1380/// emit a branch around the instrumentation code. When not instrumenting,
1381/// this just calls EmitBlock().
1383 const Stmt *S) {
1384 llvm::BasicBlock *SkipCountBB = nullptr;
1385 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1386 // When instrumenting for profiling, the fallthrough to certain
1387 // statements needs to skip over the instrumentation code so that we
1388 // get an accurate count.
1389 SkipCountBB = createBasicBlock("skipcount");
1390 EmitBranch(SkipCountBB);
1391 }
1392 EmitBlock(BB);
1393 uint64_t CurrentCount = getCurrentProfileCount();
1396 if (SkipCountBB)
1397 EmitBlock(SkipCountBB);
1398}
1399
1400/// Tries to mark the given function nounwind based on the
1401/// non-existence of any throwing calls within it. We believe this is
1402/// lightweight enough to do at -O0.
1403static void TryMarkNoThrow(llvm::Function *F) {
1404 // LLVM treats 'nounwind' on a function as part of the type, so we
1405 // can't do this on functions that can be overwritten.
1406 if (F->isInterposable()) return;
1407
1408 for (llvm::BasicBlock &BB : *F)
1409 for (llvm::Instruction &I : BB)
1410 if (I.mayThrow())
1411 return;
1412
1413 F->setDoesNotThrow();
1414}
1415
1417 FunctionArgList &Args) {
1418 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1419 QualType ResTy = FD->getReturnType();
1420
1421 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1422 if (MD && MD->isImplicitObjectMemberFunction()) {
1423 if (CGM.getCXXABI().HasThisReturn(GD))
1424 ResTy = MD->getThisType();
1425 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1426 ResTy = CGM.getContext().VoidPtrTy;
1427 CGM.getCXXABI().buildThisParam(*this, Args);
1428 }
1429
1430 // The base version of an inheriting constructor whose constructed base is a
1431 // virtual base is not passed any arguments (because it doesn't actually call
1432 // the inherited constructor).
1433 bool PassedParams = true;
1434 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1435 if (auto Inherited = CD->getInheritedConstructor())
1436 PassedParams =
1437 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1438
1439 if (PassedParams) {
1440 for (auto *Param : FD->parameters()) {
1441 Args.push_back(Param);
1442 if (!Param->hasAttr<PassObjectSizeAttr>())
1443 continue;
1444
1446 getContext(), Param->getDeclContext(), Param->getLocation(),
1447 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1448 SizeArguments[Param] = Implicit;
1449 Args.push_back(Implicit);
1450 }
1451 }
1452
1453 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1454 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1455
1456 return ResTy;
1457}
1458
1459void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1460 const CGFunctionInfo &FnInfo) {
1461 assert(Fn && "generating code for null Function");
1462 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1463 CurGD = GD;
1464
1465 FunctionArgList Args;
1466 QualType ResTy = BuildFunctionArgList(GD, Args);
1467
1468 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
1469
1470 if (FD->isInlineBuiltinDeclaration()) {
1471 // When generating code for a builtin with an inline declaration, use a
1472 // mangled name to hold the actual body, while keeping an external
1473 // definition in case the function pointer is referenced somewhere.
1474 std::string FDInlineName = (Fn->getName() + ".inline").str();
1475 llvm::Module *M = Fn->getParent();
1476 llvm::Function *Clone = M->getFunction(FDInlineName);
1477 if (!Clone) {
1478 Clone = llvm::Function::Create(Fn->getFunctionType(),
1479 llvm::GlobalValue::InternalLinkage,
1480 Fn->getAddressSpace(), FDInlineName, M);
1481 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1482 }
1483 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1484 Fn = Clone;
1485 } else {
1486 // Detect the unusual situation where an inline version is shadowed by a
1487 // non-inline version. In that case we should pick the external one
1488 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1489 // to detect that situation before we reach codegen, so do some late
1490 // replacement.
1491 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1492 PD = PD->getPreviousDecl()) {
1493 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1494 std::string FDInlineName = (Fn->getName() + ".inline").str();
1495 llvm::Module *M = Fn->getParent();
1496 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1497 Clone->replaceAllUsesWith(Fn);
1498 Clone->eraseFromParent();
1499 }
1500 break;
1501 }
1502 }
1503 }
1504
1505 // Check if we should generate debug info for this function.
1506 if (FD->hasAttr<NoDebugAttr>()) {
1507 // Clear non-distinct debug info that was possibly attached to the function
1508 // due to an earlier declaration without the nodebug attribute
1509 Fn->setSubprogram(nullptr);
1510 // Disable debug info indefinitely for this function
1511 DebugInfo = nullptr;
1512 }
1513 // Finalize function debug info on exit.
1514 llvm::scope_exit Cleanup([this] {
1515 if (CGDebugInfo *DI = getDebugInfo())
1516 DI->completeFunction();
1517 });
1518
1519 // The function might not have a body if we're generating thunks for a
1520 // function declaration.
1521 SourceRange BodyRange;
1522 if (Stmt *Body = FD->getBody())
1523 BodyRange = Body->getSourceRange();
1524 else
1525 BodyRange = FD->getLocation();
1526 CurEHLocation = BodyRange.getEnd();
1527
1528 // Use the location of the start of the function to determine where
1529 // the function definition is located. By default use the location
1530 // of the declaration as the location for the subprogram. A function
1531 // may lack a declaration in the source code if it is created by code
1532 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1533 SourceLocation Loc = FD->getLocation();
1534
1535 // If this is a function specialization then use the pattern body
1536 // as the location for the function.
1537 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1538 if (SpecDecl->hasBody(SpecDecl))
1539 Loc = SpecDecl->getLocation();
1540
1541 Stmt *Body = FD->getBody();
1542
1543 if (Body) {
1544 // Coroutines always emit lifetime markers.
1545 if (isa<CoroutineBodyStmt>(Body))
1546 ShouldEmitLifetimeMarkers = true;
1547
1548 // Initialize helper which will detect jumps which can cause invalid
1549 // lifetime markers.
1550 if (ShouldEmitLifetimeMarkers)
1551 Bypasses.Init(CGM, Body);
1552 }
1553
1554 // Emit the standard function prologue.
1555 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1556
1557 // Save parameters for coroutine function.
1558 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1559 llvm::append_range(FnArgs, FD->parameters());
1560
1561 // Ensure that the function adheres to the forward progress guarantee, which
1562 // is required by certain optimizations.
1563 // In C++11 and up, the attribute will be removed if the body contains a
1564 // trivial empty loop.
1566 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1567
1568 // Generate the body of the function.
1569 PGO->assignRegionCounters(GD, CurFn);
1570 if (isa<CXXDestructorDecl>(FD))
1571 EmitDestructorBody(Args);
1572 else if (isa<CXXConstructorDecl>(FD))
1573 EmitConstructorBody(Args);
1574 else if (getLangOpts().CUDA &&
1575 !getLangOpts().CUDAIsDevice &&
1576 FD->hasAttr<CUDAGlobalAttr>())
1577 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1578 else if (isa<CXXMethodDecl>(FD) &&
1579 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1580 // The lambda static invoker function is special, because it forwards or
1581 // clones the body of the function call operator (but is actually static).
1583 } else if (isa<CXXMethodDecl>(FD) &&
1585 !FnInfo.isDelegateCall() &&
1586 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1587 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1588 // If emitting a lambda with static invoker on X86 Windows, change
1589 // the call operator body.
1590 // Make sure that this is a call operator with an inalloca arg and check
1591 // for delegate call to make sure this is the original call op and not the
1592 // new forwarding function for the static invoker.
1594 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1595 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1596 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1597 // Implicit copy-assignment gets the same special treatment as implicit
1598 // copy-constructors.
1600 } else if (DeviceKernelAttr::isOpenCLSpelling(
1601 FD->getAttr<DeviceKernelAttr>()) &&
1603 CallArgList CallArgs;
1604 for (unsigned i = 0; i < Args.size(); ++i) {
1605 Address ArgAddr = GetAddrOfLocalVar(Args[i]);
1606 QualType ArgQualType = Args[i]->getType();
1607 RValue ArgRValue = convertTempToRValue(ArgAddr, ArgQualType, Loc);
1608 CallArgs.add(ArgRValue, ArgQualType);
1609 }
1611 const FunctionType *FT = cast<FunctionType>(FD->getType());
1612 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1613 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1614 CallArgs, FT, /*ChainCall=*/false);
1615 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FnInfo);
1616 llvm::Constant *GDStubFunctionPointer =
1617 CGM.getRawFunctionPointer(GDStub, FTy);
1618 CGCallee GDStubCallee = CGCallee::forDirect(GDStubFunctionPointer, GDStub);
1619 EmitCall(FnInfo, GDStubCallee, ReturnValueSlot(), CallArgs, nullptr, false,
1620 Loc);
1621 } else if (Body) {
1622 EmitFunctionBody(Body);
1623 } else
1624 llvm_unreachable("no definition for emitted function");
1625
1626 // C++11 [stmt.return]p2:
1627 // Flowing off the end of a function [...] results in undefined behavior in
1628 // a value-returning function.
1629 // C11 6.9.1p12:
1630 // If the '}' that terminates a function is reached, and the value of the
1631 // function call is used by the caller, the behavior is undefined.
1633 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1634 bool ShouldEmitUnreachable =
1635 CGM.getCodeGenOpts().StrictReturn ||
1636 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1637 if (SanOpts.has(SanitizerKind::Return)) {
1638 auto CheckOrdinal = SanitizerKind::SO_Return;
1639 auto CheckHandler = SanitizerHandler::MissingReturn;
1640 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1641 llvm::Value *IsFalse = Builder.getFalse();
1642 EmitCheck(std::make_pair(IsFalse, CheckOrdinal), CheckHandler,
1644 } else if (ShouldEmitUnreachable) {
1645 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1646 EmitTrapCall(llvm::Intrinsic::trap);
1647 }
1648 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1649 Builder.CreateUnreachable();
1650 Builder.ClearInsertionPoint();
1651 }
1652 }
1653
1654 // Emit the standard function epilogue.
1655 FinishFunction(BodyRange.getEnd());
1656
1657 PGO->verifyCounterMap();
1658
1659 if (CurCodeDecl->hasAttr<PersonalityAttr>()) {
1660 StringRef Identifier =
1661 CurCodeDecl->getAttr<PersonalityAttr>()->getRoutine()->getName();
1662 llvm::FunctionCallee PersonalityRoutine =
1663 CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
1664 Identifier, {}, /*local=*/true);
1665 Fn->setPersonalityFn(cast<llvm::Constant>(PersonalityRoutine.getCallee()));
1666 }
1667
1668 // If we haven't marked the function nothrow through other means, do
1669 // a quick pass now to see if we can.
1670 if (!CurFn->doesNotThrow())
1672}
1673
1674/// ContainsLabel - Return true if the statement contains a label in it. If
1675/// this statement is not executed normally, it not containing a label means
1676/// that we can just remove the code.
1677bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1678 // Null statement, not a label!
1679 if (!S) return false;
1680
1681 // If this is a label, we have to emit the code, consider something like:
1682 // if (0) { ... foo: bar(); } goto foo;
1683 //
1684 // TODO: If anyone cared, we could track __label__'s, since we know that you
1685 // can't jump to one from outside their declared region.
1686 if (isa<LabelStmt>(S))
1687 return true;
1688
1689 // If this is a case/default statement, and we haven't seen a switch, we have
1690 // to emit the code.
1691 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1692 return true;
1693
1694 // If this is a switch statement, we want to ignore cases below it.
1695 if (isa<SwitchStmt>(S))
1696 IgnoreCaseStmts = true;
1697
1698 // Scan subexpressions for verboten labels.
1699 for (const Stmt *SubStmt : S->children())
1700 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1701 return true;
1702
1703 return false;
1704}
1705
1706/// containsBreak - Return true if the statement contains a break out of it.
1707/// If the statement (recursively) contains a switch or loop with a break
1708/// inside of it, this is fine.
1710 // Null statement, not a label!
1711 if (!S) return false;
1712
1713 // If this is a switch or loop that defines its own break scope, then we can
1714 // include it and anything inside of it.
1715 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1716 isa<ForStmt>(S))
1717 return false;
1718
1719 if (isa<BreakStmt>(S))
1720 return true;
1721
1722 // Scan subexpressions for verboten breaks.
1723 for (const Stmt *SubStmt : S->children())
1724 if (containsBreak(SubStmt))
1725 return true;
1726
1727 return false;
1728}
1729
1731 if (!S) return false;
1732
1733 // Some statement kinds add a scope and thus never add a decl to the current
1734 // scope. Note, this list is longer than the list of statements that might
1735 // have an unscoped decl nested within them, but this way is conservatively
1736 // correct even if more statement kinds are added.
1737 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1741 return false;
1742
1743 if (isa<DeclStmt>(S))
1744 return true;
1745
1746 for (const Stmt *SubStmt : S->children())
1747 if (mightAddDeclToScope(SubStmt))
1748 return true;
1749
1750 return false;
1751}
1752
1753/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1754/// to a constant, or if it does but contains a label, return false. If it
1755/// constant folds return true and set the boolean result in Result.
1757 bool &ResultBool,
1758 bool AllowLabels) {
1759 // If MC/DC is enabled, disable folding so that we can instrument all
1760 // conditions to yield complete test vectors. We still keep track of
1761 // folded conditions during region mapping and visualization.
1762 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1763 CGM.getCodeGenOpts().MCDCCoverage)
1764 return false;
1765
1766 llvm::APSInt ResultInt;
1767 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1768 return false;
1769
1770 ResultBool = ResultInt.getBoolValue();
1771 return true;
1772}
1773
1774/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1775/// to a constant, or if it does but contains a label, return false. If it
1776/// constant folds return true and set the folded value.
1778 llvm::APSInt &ResultInt,
1779 bool AllowLabels) {
1780 // FIXME: Rename and handle conversion of other evaluatable things
1781 // to bool.
1783 if (!Cond->EvaluateAsInt(Result, getContext()))
1784 return false; // Not foldable, not integer or not fully evaluatable.
1785
1786 llvm::APSInt Int = Result.Val.getInt();
1787 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1788 return false; // Contains a label.
1789
1790 PGO->markStmtMaybeUsed(Cond);
1791 ResultInt = std::move(Int);
1792 return true;
1793}
1794
1795/// Strip parentheses and simplistic logical-NOT operators.
1797 while (true) {
1798 const Expr *SC = IgnoreExprNodes(
1801 if (C == SC)
1802 return SC;
1803 C = SC;
1804 }
1805}
1806
1807/// Determine whether the given condition is an instrumentable condition
1808/// (i.e. no "&&" or "||").
1810 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1811 return (!BOp || !BOp->isLogicalOp());
1812}
1813
1814/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1815/// increments a profile counter based on the semantics of the given logical
1816/// operator opcode. This is used to instrument branch condition coverage for
1817/// logical operators.
1819 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1820 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1821 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1822 // If not instrumenting, just emit a branch.
1823 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1824 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1825 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1826
1827 const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1828
1829 llvm::BasicBlock *ThenBlock = nullptr;
1830 llvm::BasicBlock *ElseBlock = nullptr;
1831 llvm::BasicBlock *NextBlock = nullptr;
1832
1833 // Create the block we'll use to increment the appropriate counter.
1834 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1835
1836 llvm::BasicBlock *SkipIncrBlock =
1837 (hasSkipCounter(CntrStmt) ? createBasicBlock("lop.rhsskip") : nullptr);
1838 llvm::BasicBlock *SkipNextBlock = nullptr;
1839
1840 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1841 // means we need to evaluate the condition and increment the counter on TRUE:
1842 //
1843 // if (Cond)
1844 // goto CounterIncrBlock;
1845 // else
1846 // goto FalseBlock;
1847 //
1848 // CounterIncrBlock:
1849 // Counter++;
1850 // goto TrueBlock;
1851
1852 if (LOp == BO_LAnd) {
1853 SkipNextBlock = FalseBlock;
1854 ThenBlock = CounterIncrBlock;
1855 ElseBlock = (SkipIncrBlock ? SkipIncrBlock : SkipNextBlock);
1856 NextBlock = TrueBlock;
1857 }
1858
1859 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1860 // we need to evaluate the condition and increment the counter on FALSE:
1861 //
1862 // if (Cond)
1863 // goto TrueBlock;
1864 // else
1865 // goto CounterIncrBlock;
1866 //
1867 // CounterIncrBlock:
1868 // Counter++;
1869 // goto FalseBlock;
1870
1871 else if (LOp == BO_LOr) {
1872 SkipNextBlock = TrueBlock;
1873 ThenBlock = (SkipIncrBlock ? SkipIncrBlock : SkipNextBlock);
1874 ElseBlock = CounterIncrBlock;
1875 NextBlock = FalseBlock;
1876 } else {
1877 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1878 }
1879
1880 // Emit Branch based on condition.
1881 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1882
1883 if (SkipIncrBlock) {
1884 EmitBlock(SkipIncrBlock);
1886 EmitBranch(SkipNextBlock);
1887 }
1888
1889 // Emit the block containing the counter increment(s).
1890 EmitBlock(CounterIncrBlock);
1891
1892 // Increment corresponding counter; if index not provided, use Cond as index.
1894
1895 // Go to the next block.
1896 EmitBranch(NextBlock);
1897}
1898
1899/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1900/// statement) to the specified blocks. Based on the condition, this might try
1901/// to simplify the codegen of the conditional based on the branch.
1902/// \param LH The value of the likelihood attribute on the True branch.
1903/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1904/// ConditionalOperator (ternary) through a recursive call for the operator's
1905/// LHS and RHS nodes.
1907 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1908 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1909 const VarDecl *ConditionalDecl) {
1910 Cond = Cond->IgnoreParens();
1911
1912 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1913 bool HasSkip = hasSkipCounter(CondBOp);
1914
1915 // Handle X && Y in a condition.
1916 if (CondBOp->getOpcode() == BO_LAnd) {
1917 // If we have "1 && X", simplify the code. "0 && X" would have constant
1918 // folded if the case was simple enough.
1919 bool ConstantBool = false;
1920 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1921 ConstantBool) {
1922 // br(1 && X) -> br(X).
1923 incrementProfileCounter(CondBOp);
1924 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1925 FalseBlock, TrueCount, LH);
1926 return;
1927 }
1928
1929 // If we have "X && 1", simplify the code to use an uncond branch.
1930 // "X && 0" would have been constant folded to 0.
1931 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1932 ConstantBool) {
1933 // br(X && 1) -> br(X).
1934 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1935 FalseBlock, TrueCount, LH, CondBOp);
1936 return;
1937 }
1938
1939 // Emit the LHS as a conditional. If the LHS conditional is false, we
1940 // want to jump to the FalseBlock.
1941 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1942 llvm::BasicBlock *LHSFalse =
1943 (HasSkip ? createBasicBlock("land.lhsskip") : FalseBlock);
1944 // The counter tells us how often we evaluate RHS, and all of TrueCount
1945 // can be propagated to that branch.
1946 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1947
1948 ConditionalEvaluation eval(*this);
1949 {
1950 ApplyDebugLocation DL(*this, Cond);
1951 // Propagate the likelihood attribute like __builtin_expect
1952 // __builtin_expect(X && Y, 1) -> X and Y are likely
1953 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1954 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, LHSFalse, RHSCount,
1955 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1956 if (HasSkip) {
1957 EmitBlock(LHSFalse);
1959 EmitBranch(FalseBlock);
1960 }
1961 EmitBlock(LHSTrue);
1962 }
1963
1965 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1966
1967 // Any temporaries created here are conditional.
1968 eval.begin(*this);
1969 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1970 FalseBlock, TrueCount, LH);
1971 eval.end(*this);
1972 return;
1973 }
1974
1975 if (CondBOp->getOpcode() == BO_LOr) {
1976 // If we have "0 || X", simplify the code. "1 || X" would have constant
1977 // folded if the case was simple enough.
1978 bool ConstantBool = false;
1979 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1980 !ConstantBool) {
1981 // br(0 || X) -> br(X).
1982 incrementProfileCounter(CondBOp);
1983 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1984 FalseBlock, TrueCount, LH);
1985 return;
1986 }
1987
1988 // If we have "X || 0", simplify the code to use an uncond branch.
1989 // "X || 1" would have been constant folded to 1.
1990 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1991 !ConstantBool) {
1992 // br(X || 0) -> br(X).
1993 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1994 FalseBlock, TrueCount, LH, CondBOp);
1995 return;
1996 }
1997 // Emit the LHS as a conditional. If the LHS conditional is true, we
1998 // want to jump to the TrueBlock.
1999 llvm::BasicBlock *LHSTrue =
2000 (HasSkip ? createBasicBlock("lor.lhsskip") : TrueBlock);
2001 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
2002 // We have the count for entry to the RHS and for the whole expression
2003 // being true, so we can divy up True count between the short circuit and
2004 // the RHS.
2005 uint64_t LHSCount =
2006 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
2007 uint64_t RHSCount = TrueCount - LHSCount;
2008
2009 ConditionalEvaluation eval(*this);
2010 {
2011 // Propagate the likelihood attribute like __builtin_expect
2012 // __builtin_expect(X || Y, 1) -> only Y is likely
2013 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
2014 ApplyDebugLocation DL(*this, Cond);
2015 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, LHSFalse, LHSCount,
2016 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
2017 if (HasSkip) {
2018 EmitBlock(LHSTrue);
2020 EmitBranch(TrueBlock);
2021 }
2022 EmitBlock(LHSFalse);
2023 }
2024
2026 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
2027
2028 // Any temporaries created here are conditional.
2029 eval.begin(*this);
2030 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
2031 RHSCount, LH);
2032
2033 eval.end(*this);
2034 return;
2035 }
2036 }
2037
2038 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
2039 // br(!x, t, f) -> br(x, f, t)
2040 // Avoid doing this optimization when instrumenting a condition for MC/DC.
2041 // LNot is taken as part of the condition for simplicity, and changing its
2042 // sense negatively impacts test vector tracking.
2043 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2044 CGM.getCodeGenOpts().MCDCCoverage &&
2046 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2047 // Negate the count.
2048 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2049 // The values of the enum are chosen to make this negation possible.
2050 LH = static_cast<Stmt::Likelihood>(-LH);
2051 // Negate the condition and swap the destination blocks.
2052 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
2053 FalseCount, LH);
2054 }
2055 }
2056
2057 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
2058 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2059 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
2060 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
2061
2062 // The ConditionalOperator itself has no likelihood information for its
2063 // true and false branches. This matches the behavior of __builtin_expect.
2064 ConditionalEvaluation cond(*this);
2065 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
2067
2068 // When computing PGO branch weights, we only know the overall count for
2069 // the true block. This code is essentially doing tail duplication of the
2070 // naive code-gen, introducing new edges for which counts are not
2071 // available. Divide the counts proportionally between the LHS and RHS of
2072 // the conditional operator.
2073 uint64_t LHSScaledTrueCount = 0;
2074 if (TrueCount) {
2075 double LHSRatio =
2076 getProfileCount(CondOp) / (double)getCurrentProfileCount();
2077 LHSScaledTrueCount = TrueCount * LHSRatio;
2078 }
2079
2080 cond.begin(*this);
2081 EmitBlock(LHSBlock);
2083 {
2084 ApplyDebugLocation DL(*this, Cond);
2085 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2086 LHSScaledTrueCount, LH, CondOp);
2087 }
2088 cond.end(*this);
2089
2090 cond.begin(*this);
2091 EmitBlock(RHSBlock);
2093 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2094 TrueCount - LHSScaledTrueCount, LH, CondOp);
2095 cond.end(*this);
2096
2097 return;
2098 }
2099
2100 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
2101 // Conditional operator handling can give us a throw expression as a
2102 // condition for a case like:
2103 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2104 // Fold this to:
2105 // br(c, throw x, br(y, t, f))
2106 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
2107 return;
2108 }
2109
2110 // Emit the code with the fully general case.
2111 llvm::Value *CondV;
2112 {
2113 ApplyDebugLocation DL(*this, Cond);
2114 CondV = EvaluateExprAsBool(Cond);
2115 }
2116
2117 MaybeEmitDeferredVarDeclInit(ConditionalDecl);
2118
2119 // If not at the top of the logical operator nest, update MCDC temp with the
2120 // boolean result of the evaluated condition.
2121 {
2122 const Expr *MCDCBaseExpr = Cond;
2123 // When a nested ConditionalOperator (ternary) is encountered in a boolean
2124 // expression, MC/DC tracks the result of the ternary, and this is tied to
2125 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2126 // this is the case, the ConditionalOperator expression is passed through
2127 // the ConditionalOp parameter and then used as the MCDC base expression.
2128 if (ConditionalOp)
2129 MCDCBaseExpr = ConditionalOp;
2130
2131 if (isMCDCBranchExpr(stripCond(MCDCBaseExpr)) &&
2133 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
2134 }
2135
2136 llvm::MDNode *Weights = nullptr;
2137 llvm::MDNode *Unpredictable = nullptr;
2138
2139 // If the branch has a condition wrapped by __builtin_unpredictable,
2140 // create metadata that specifies that the branch is unpredictable.
2141 // Don't bother if not optimizing because that metadata would not be used.
2142 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
2143 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2144 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2145 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2146 llvm::MDBuilder MDHelper(getLLVMContext());
2147 Unpredictable = MDHelper.createUnpredictable();
2148 }
2149 }
2150
2151 // If there is a Likelihood knowledge for the cond, lower it.
2152 // Note that if not optimizing this won't emit anything.
2153 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
2154 if (CondV != NewCondV)
2155 CondV = NewCondV;
2156 else {
2157 // Otherwise, lower profile counts. Note that we do this even at -O0.
2158 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2159 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2160 }
2161
2162 llvm::Instruction *BrInst = Builder.CreateCondBr(CondV, TrueBlock, FalseBlock,
2163 Weights, Unpredictable);
2164 addInstToNewSourceAtom(BrInst, CondV);
2165
2166 switch (HLSLControlFlowAttr) {
2167 case HLSLControlFlowHintAttr::Microsoft_branch:
2168 case HLSLControlFlowHintAttr::Microsoft_flatten: {
2169 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2170
2171 llvm::ConstantInt *BranchHintConstant =
2173 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2174 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2175 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2176
2178 {MDHelper.createString("hlsl.controlflow.hint"),
2179 MDHelper.createConstant(BranchHintConstant)});
2180 BrInst->setMetadata("hlsl.controlflow.hint",
2181 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2182 break;
2183 }
2184 // This is required to avoid warnings during compilation
2185 case HLSLControlFlowHintAttr::SpellingNotCalculated:
2186 break;
2187 }
2188}
2189
2190llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
2191 unsigned Idx,
2192 const CallExpr *E) {
2193 llvm::Value *Arg = nullptr;
2194 if ((ICEArguments & (1 << Idx)) == 0) {
2195 Arg = EmitScalarExpr(E->getArg(Idx));
2196 } else {
2197 // If this is required to be a constant, constant fold it so that we
2198 // know that the generated intrinsic gets a ConstantInt.
2199 std::optional<llvm::APSInt> Result =
2201 assert(Result && "Expected argument to be a constant");
2202 Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
2203 }
2204 return Arg;
2205}
2206
2207/// ErrorUnsupported - Print out an error that codegen doesn't support the
2208/// specified stmt yet.
2209void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2210 CGM.ErrorUnsupported(S, Type);
2211}
2212
2213/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2214/// variable-length array whose elements have a non-zero bit-pattern.
2215///
2216/// \param baseType the inner-most element type of the array
2217/// \param src - a char* pointing to the bit-pattern for a single
2218/// base element of the array
2219/// \param sizeInChars - the total size of the VLA, in chars
2221 Address dest, Address src,
2222 llvm::Value *sizeInChars) {
2223 CGBuilderTy &Builder = CGF.Builder;
2224
2225 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2226 llvm::Value *baseSizeInChars
2227 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2228
2229 Address begin = dest.withElementType(CGF.Int8Ty);
2230 llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2231 begin.emitRawPointer(CGF),
2232 sizeInChars, "vla.end");
2233
2234 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2235 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2236 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2237
2238 // Make a loop over the VLA. C99 guarantees that the VLA element
2239 // count must be nonzero.
2240 CGF.EmitBlock(loopBB);
2241
2242 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2243 cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2244
2245 CharUnits curAlign =
2246 dest.getAlignment().alignmentOfArrayElement(baseSize);
2247
2248 // memcpy the individual element bit-pattern.
2249 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2250 /*volatile*/ false);
2251
2252 // Go to the next element.
2253 llvm::Value *next =
2254 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2255
2256 // Leave if that's the end of the VLA.
2257 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2258 Builder.CreateCondBr(done, contBB, loopBB);
2259 cur->addIncoming(next, loopBB);
2260
2261 CGF.EmitBlock(contBB);
2262}
2263
2265 const PFPField &Field) {
2266 return EmitAddressOfPFPField(
2267 RecordPtr,
2268 Builder.CreateConstInBoundsByteGEP(RecordPtr.withElementType(Int8Ty),
2269 Field.Offset),
2270 Field.Field);
2271}
2272
2274 Address PtrPtr,
2275 const FieldDecl *Field) {
2276 llvm::Value *Disc;
2277 if (CGM.getContext().arePFPFieldsTriviallyCopyable(Field->getParent())) {
2278 uint64_t FieldSignature =
2279 llvm::getPointerAuthStableSipHash(CGM.getPFPFieldName(Field));
2280 Disc = llvm::ConstantInt::get(CGM.Int64Ty, FieldSignature);
2281 } else
2282 Disc = Builder.CreatePtrToInt(RecordPtr.getBasePointer(), CGM.Int64Ty);
2283
2284 llvm::GlobalValue *DS = CGM.getPFPDeactivationSymbol(Field);
2285 llvm::OperandBundleDef DSBundle("deactivation-symbol", DS);
2286 llvm::Value *Args[] = {PtrPtr.getBasePointer(), Disc, Builder.getTrue()};
2287 return Address(
2288 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::protected_field_ptr,
2289 PtrPtr.getType()),
2290 Args, DSBundle),
2291 VoidPtrTy, PtrPtr.getAlignment());
2292}
2293
2294void
2296 // Ignore empty classes in C++.
2297 if (getLangOpts().CPlusPlus)
2298 if (const auto *RD = Ty->getAsCXXRecordDecl(); RD && RD->isEmpty())
2299 return;
2300
2301 if (DestPtr.getElementType() != Int8Ty)
2302 DestPtr = DestPtr.withElementType(Int8Ty);
2303
2304 // Get size and alignment info for this aggregate.
2306
2307 llvm::Value *SizeVal;
2308 const VariableArrayType *vla;
2309
2310 // Don't bother emitting a zero-byte memset.
2311 if (size.isZero()) {
2312 // But note that getTypeInfo returns 0 for a VLA.
2313 if (const VariableArrayType *vlaType =
2314 dyn_cast_or_null<VariableArrayType>(
2315 getContext().getAsArrayType(Ty))) {
2316 auto VlaSize = getVLASize(vlaType);
2317 SizeVal = VlaSize.NumElts;
2318 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2319 if (!eltSize.isOne())
2320 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2321 vla = vlaType;
2322 } else {
2323 return;
2324 }
2325 } else {
2326 SizeVal = CGM.getSize(size);
2327 vla = nullptr;
2328 }
2329
2330 // If the type contains a pointer to data member we can't memset it to zero.
2331 // Instead, create a null constant and copy it to the destination.
2332 // TODO: there are other patterns besides zero that we can usefully memset,
2333 // like -1, which happens to be the pattern used by member-pointers.
2334 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2335 // For a VLA, emit a single element, then splat that over the VLA.
2336 if (vla) Ty = getContext().getBaseElementType(vla);
2337
2338 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2339
2340 llvm::GlobalVariable *NullVariable =
2341 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2342 /*isConstant=*/true,
2343 llvm::GlobalVariable::PrivateLinkage,
2344 NullConstant, Twine());
2345 CharUnits NullAlign = DestPtr.getAlignment();
2346 NullVariable->setAlignment(NullAlign.getAsAlign());
2347 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2348
2349 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2350
2351 // Get and call the appropriate llvm.memcpy overload.
2352 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2353 } else {
2354 // Otherwise, just memset the whole thing to zero. This is legal
2355 // because in LLVM, all default initializers (other than the ones we just
2356 // handled above, and the case handled below) are guaranteed to have a bit
2357 // pattern of all zeros.
2358 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2359 }
2360
2361 // With the pointer field protection feature, null pointers do not have a bit
2362 // pattern of zero in memory, so we must initialize them separately.
2363 for (auto &Field : getContext().findPFPFields(Ty)) {
2364 auto addr = EmitAddressOfPFPField(DestPtr, Field);
2365 Builder.CreateStore(llvm::ConstantPointerNull::get(VoidPtrTy), addr);
2366 }
2367}
2368
2369llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2370 // Make sure that there is a block for the indirect goto.
2371 if (!IndirectBranch)
2373
2374 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2375
2376 // Make sure the indirect branch includes all of the address-taken blocks.
2377 IndirectBranch->addDestination(BB);
2378 return llvm::BlockAddress::get(CurFn->getType(), BB);
2379}
2380
2382 // If we already made the indirect branch for indirect goto, return its block.
2383 if (IndirectBranch) return IndirectBranch->getParent();
2384
2385 CGBuilderTy TmpBuilder(CGM, createBasicBlock("indirectgoto"));
2386
2387 // Create the PHI node that indirect gotos will add entries to.
2388 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2389 "indirect.goto.dest");
2390
2391 // Create the indirect branch instruction.
2392 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2393 return IndirectBranch->getParent();
2394}
2395
2396/// Computes the length of an array in elements, as well as the base
2397/// element type and a properly-typed first element pointer.
2398llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2399 QualType &baseType,
2400 Address &addr) {
2401 const ArrayType *arrayType = origArrayType;
2402
2403 // If it's a VLA, we have to load the stored size. Note that
2404 // this is the size of the VLA in bytes, not its size in elements.
2405 llvm::Value *numVLAElements = nullptr;
2408
2409 // Walk into all VLAs. This doesn't require changes to addr,
2410 // which has type T* where T is the first non-VLA element type.
2411 do {
2412 QualType elementType = arrayType->getElementType();
2413 arrayType = getContext().getAsArrayType(elementType);
2414
2415 // If we only have VLA components, 'addr' requires no adjustment.
2416 if (!arrayType) {
2417 baseType = elementType;
2418 return numVLAElements;
2419 }
2421
2422 // We get out here only if we find a constant array type
2423 // inside the VLA.
2424 }
2425
2426 // We have some number of constant-length arrays, so addr should
2427 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2428 // down to the first element of addr.
2430
2431 // GEP down to the array type.
2432 llvm::ConstantInt *zero = Builder.getInt32(0);
2433 gepIndices.push_back(zero);
2434
2435 uint64_t countFromCLAs = 1;
2436 QualType eltType;
2437
2438 llvm::ArrayType *llvmArrayType =
2439 dyn_cast<llvm::ArrayType>(addr.getElementType());
2440 while (llvmArrayType) {
2442 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2443 llvmArrayType->getNumElements());
2444
2445 gepIndices.push_back(zero);
2446 countFromCLAs *= llvmArrayType->getNumElements();
2447 eltType = arrayType->getElementType();
2448
2449 llvmArrayType =
2450 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2451 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2452 assert((!llvmArrayType || arrayType) &&
2453 "LLVM and Clang types are out-of-synch");
2454 }
2455
2456 if (arrayType) {
2457 // From this point onwards, the Clang array type has been emitted
2458 // as some other type (probably a packed struct). Compute the array
2459 // size, and just emit the 'begin' expression as a bitcast.
2460 while (arrayType) {
2461 countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2462 eltType = arrayType->getElementType();
2463 arrayType = getContext().getAsArrayType(eltType);
2464 }
2465
2466 llvm::Type *baseType = ConvertType(eltType);
2467 addr = addr.withElementType(baseType);
2468 } else {
2469 // Create the actual GEP.
2470 addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
2471 addr.emitRawPointer(*this),
2472 gepIndices, "array.begin"),
2473 ConvertTypeForMem(eltType), addr.getAlignment());
2474 }
2475
2476 baseType = eltType;
2477
2478 llvm::Value *numElements
2479 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2480
2481 // If we had any VLA dimensions, factor them in.
2482 if (numVLAElements)
2483 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2484
2485 return numElements;
2486}
2487
2490 assert(vla && "type was not a variable array type!");
2491 return getVLASize(vla);
2492}
2493
2496 // The number of elements so far; always size_t.
2497 llvm::Value *numElements = nullptr;
2498
2499 QualType elementType;
2500 do {
2501 elementType = type->getElementType();
2502 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2503 assert(vlaSize && "no size for VLA!");
2504 assert(vlaSize->getType() == SizeTy);
2505
2506 if (!numElements) {
2507 numElements = vlaSize;
2508 } else {
2509 // It's undefined behavior if this wraps around, so mark it that way.
2510 // FIXME: Teach -fsanitize=undefined to trap this.
2511 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2512 }
2513 } while ((type = getContext().getAsVariableArrayType(elementType)));
2514
2515 return { numElements, elementType };
2516}
2517
2521 assert(vla && "type was not a variable array type!");
2522 return getVLAElements1D(vla);
2523}
2524
2527 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2528 assert(VlaSize && "no size for VLA!");
2529 assert(VlaSize->getType() == SizeTy);
2530 return { VlaSize, Vla->getElementType() };
2531}
2532
2534 assert(type->isVariablyModifiedType() &&
2535 "Must pass variably modified type to EmitVLASizes!");
2536
2538
2539 // We're going to walk down into the type and look for VLA
2540 // expressions.
2541 do {
2542 assert(type->isVariablyModifiedType());
2543
2544 const Type *ty = type.getTypePtr();
2545 switch (ty->getTypeClass()) {
2546
2547#define TYPE(Class, Base)
2548#define ABSTRACT_TYPE(Class, Base)
2549#define NON_CANONICAL_TYPE(Class, Base)
2550#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2551#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2552#include "clang/AST/TypeNodes.inc"
2553 llvm_unreachable("unexpected dependent type!");
2554
2555 // These types are never variably-modified.
2556 case Type::Builtin:
2557 case Type::Complex:
2558 case Type::Vector:
2559 case Type::ExtVector:
2560 case Type::ConstantMatrix:
2561 case Type::Record:
2562 case Type::Enum:
2563 case Type::Using:
2564 case Type::TemplateSpecialization:
2565 case Type::ObjCTypeParam:
2566 case Type::ObjCObject:
2567 case Type::ObjCInterface:
2568 case Type::ObjCObjectPointer:
2569 case Type::BitInt:
2570 case Type::HLSLInlineSpirv:
2571 case Type::PredefinedSugar:
2572 llvm_unreachable("type class is never variably-modified!");
2573
2574 case Type::Adjusted:
2575 type = cast<AdjustedType>(ty)->getAdjustedType();
2576 break;
2577
2578 case Type::Decayed:
2579 type = cast<DecayedType>(ty)->getPointeeType();
2580 break;
2581
2582 case Type::Pointer:
2583 type = cast<PointerType>(ty)->getPointeeType();
2584 break;
2585
2586 case Type::BlockPointer:
2587 type = cast<BlockPointerType>(ty)->getPointeeType();
2588 break;
2589
2590 case Type::LValueReference:
2591 case Type::RValueReference:
2592 type = cast<ReferenceType>(ty)->getPointeeType();
2593 break;
2594
2595 case Type::MemberPointer:
2596 type = cast<MemberPointerType>(ty)->getPointeeType();
2597 break;
2598
2599 case Type::ArrayParameter:
2600 case Type::ConstantArray:
2601 case Type::IncompleteArray:
2602 // Losing element qualification here is fine.
2603 type = cast<ArrayType>(ty)->getElementType();
2604 break;
2605
2606 case Type::VariableArray: {
2607 // Losing element qualification here is fine.
2609
2610 // Unknown size indication requires no size computation.
2611 // Otherwise, evaluate and record it.
2612 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2613 // It's possible that we might have emitted this already,
2614 // e.g. with a typedef and a pointer to it.
2615 llvm::Value *&entry = VLASizeMap[sizeExpr];
2616 if (!entry) {
2617 llvm::Value *size = EmitScalarExpr(sizeExpr);
2618
2619 // C11 6.7.6.2p5:
2620 // If the size is an expression that is not an integer constant
2621 // expression [...] each time it is evaluated it shall have a value
2622 // greater than zero.
2623 if (SanOpts.has(SanitizerKind::VLABound)) {
2624 auto CheckOrdinal = SanitizerKind::SO_VLABound;
2625 auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2626 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2627 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2628 clang::QualType SEType = sizeExpr->getType();
2629 llvm::Value *CheckCondition =
2630 SEType->isSignedIntegerType()
2631 ? Builder.CreateICmpSGT(size, Zero)
2632 : Builder.CreateICmpUGT(size, Zero);
2633 llvm::Constant *StaticArgs[] = {
2634 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2635 EmitCheckTypeDescriptor(SEType)};
2636 EmitCheck(std::make_pair(CheckCondition, CheckOrdinal),
2637 CheckHandler, StaticArgs, size);
2638 }
2639
2640 // Always zexting here would be wrong if it weren't
2641 // undefined behavior to have a negative bound.
2642 // FIXME: What about when size's type is larger than size_t?
2643 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2644 }
2645 }
2646 type = vat->getElementType();
2647 break;
2648 }
2649
2650 case Type::FunctionProto:
2651 case Type::FunctionNoProto:
2652 type = cast<FunctionType>(ty)->getReturnType();
2653 break;
2654
2655 case Type::Paren:
2656 case Type::TypeOf:
2657 case Type::UnaryTransform:
2658 case Type::Attributed:
2659 case Type::BTFTagAttributed:
2660 case Type::OverflowBehavior:
2661 case Type::HLSLAttributedResource:
2662 case Type::SubstTemplateTypeParm:
2663 case Type::MacroQualified:
2664 case Type::CountAttributed:
2665 // Keep walking after single level desugaring.
2666 type = type.getSingleStepDesugaredType(getContext());
2667 break;
2668
2669 case Type::Typedef:
2670 case Type::Decltype:
2671 case Type::Auto:
2672 case Type::DeducedTemplateSpecialization:
2673 case Type::PackIndexing:
2674 // Stop walking: nothing to do.
2675 return;
2676
2677 case Type::TypeOfExpr:
2678 // Stop walking: emit typeof expression.
2679 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2680 return;
2681
2682 case Type::Atomic:
2683 type = cast<AtomicType>(ty)->getValueType();
2684 break;
2685
2686 case Type::Pipe:
2687 type = cast<PipeType>(ty)->getElementType();
2688 break;
2689 }
2690 } while (type->isVariablyModifiedType());
2691}
2692
2694 if (getContext().getBuiltinVaListType()->isArrayType())
2695 return EmitPointerWithAlignment(E);
2696 return EmitLValue(E).getAddress();
2697}
2698
2702
2704 const APValue &Init) {
2705 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2706 if (CGDebugInfo *Dbg = getDebugInfo())
2707 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2708 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2709}
2710
2713 // At the moment, the only aggressive peephole we do in IR gen
2714 // is trunc(zext) folding, but if we add more, we can easily
2715 // extend this protection.
2716
2717 if (!rvalue.isScalar()) return PeepholeProtection();
2718 llvm::Value *value = rvalue.getScalarVal();
2719 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2720
2721 // Just make an extra bitcast.
2722 assert(HaveInsertPoint());
2723 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2724 Builder.GetInsertBlock());
2725
2726 PeepholeProtection protection;
2727 protection.Inst = inst;
2728 return protection;
2729}
2730
2732 if (!protection.Inst) return;
2733
2734 // In theory, we could try to duplicate the peepholes now, but whatever.
2735 protection.Inst->eraseFromParent();
2736}
2737
2739 QualType Ty, SourceLocation Loc,
2740 SourceLocation AssumptionLoc,
2741 llvm::Value *Alignment,
2742 llvm::Value *OffsetValue) {
2743 if (Alignment->getType() != IntPtrTy)
2744 Alignment =
2745 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2746 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2747 OffsetValue =
2748 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2749 llvm::Value *TheCheck = nullptr;
2750 if (SanOpts.has(SanitizerKind::Alignment)) {
2751 llvm::Value *PtrIntValue =
2752 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2753
2754 if (OffsetValue) {
2755 bool IsOffsetZero = false;
2756 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2757 IsOffsetZero = CI->isZero();
2758
2759 if (!IsOffsetZero)
2760 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2761 }
2762
2763 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2764 llvm::Value *Mask =
2765 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2766 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2767 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2768 }
2769 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2770 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2771
2772 if (!SanOpts.has(SanitizerKind::Alignment))
2773 return;
2774 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2775 OffsetValue, TheCheck, Assumption);
2776}
2777
2779 const Expr *E,
2780 SourceLocation AssumptionLoc,
2781 llvm::Value *Alignment,
2782 llvm::Value *OffsetValue) {
2783 QualType Ty = E->getType();
2784 SourceLocation Loc = E->getExprLoc();
2785
2786 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2787 OffsetValue);
2788}
2789
2790llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2791 llvm::Value *AnnotatedVal,
2792 StringRef AnnotationStr,
2793 SourceLocation Location,
2794 const AnnotateAttr *Attr) {
2796 AnnotatedVal,
2797 CGM.EmitAnnotationString(AnnotationStr),
2798 CGM.EmitAnnotationUnit(Location),
2799 CGM.EmitAnnotationLineNo(Location),
2800 };
2801 if (Attr)
2802 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2803 return Builder.CreateCall(AnnotationFn, Args);
2804}
2805
2806void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2807 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2808 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2809 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2810 {V->getType(), CGM.ConstGlobalsPtrTy}),
2811 V, I->getAnnotation(), D->getLocation(), I);
2812}
2813
2815 Address Addr) {
2816 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2817 llvm::Value *V = Addr.emitRawPointer(*this);
2818 llvm::Type *VTy = V->getType();
2819 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2820 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2821 llvm::PointerType *IntrinTy =
2822 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2823 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2824 {IntrinTy, CGM.ConstGlobalsPtrTy});
2825
2826 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2827 // FIXME Always emit the cast inst so we can differentiate between
2828 // annotation on the first field of a struct and annotation on the struct
2829 // itself.
2830 if (VTy != IntrinTy)
2831 V = Builder.CreateBitCast(V, IntrinTy);
2832 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2833 V = Builder.CreateBitCast(V, VTy);
2834 }
2835
2836 return Address(V, Addr.getElementType(), Addr.getAlignment());
2837}
2838
2840
2842 : CGF(CGF) {
2843 assert(!CGF->IsSanitizerScope);
2844 CGF->IsSanitizerScope = true;
2845}
2846
2848 CGF->IsSanitizerScope = false;
2849}
2850
2851void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2852 const llvm::Twine &Name,
2853 llvm::BasicBlock::iterator InsertPt) const {
2854 LoopStack.InsertHelper(I);
2855 if (IsSanitizerScope)
2856 I->setNoSanitizeMetadata();
2857}
2858
2860 llvm::Instruction *I, const llvm::Twine &Name,
2861 llvm::BasicBlock::iterator InsertPt) const {
2862 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2863 if (CGF)
2864 CGF->InsertHelper(I, Name, InsertPt);
2865}
2866
2867// Emits an error if we don't have a valid set of target features for the
2868// called function.
2870 const FunctionDecl *TargetDecl) {
2871 // SemaChecking cannot handle below x86 builtins because they have different
2872 // parameter ranges with different TargetAttribute of caller.
2873 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2874 unsigned BuiltinID = TargetDecl->getBuiltinID();
2875 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2876 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2877 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2878 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2879 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2880 llvm::StringMap<bool> TargetFetureMap;
2881 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2882 llvm::APSInt Result =
2883 *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
2884 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2885 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2886 << TargetDecl->getDeclName() << "avx";
2887 }
2888 }
2889 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2890}
2891
2892// Emits an error if we don't have a valid set of target features for the
2893// called function.
2895 const FunctionDecl *TargetDecl) {
2896 // Early exit if this is an indirect call.
2897 if (!TargetDecl)
2898 return;
2899
2900 // Get the current enclosing function if it exists. If it doesn't
2901 // we can't check the target features anyhow.
2902 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2903 if (!FD)
2904 return;
2905
2906 bool IsAlwaysInline = TargetDecl->hasAttr<AlwaysInlineAttr>();
2907 bool IsFlatten = FD && FD->hasAttr<FlattenAttr>();
2908
2909 // Grab the required features for the call. For a builtin this is listed in
2910 // the td file with the default cpu, for an always_inline function this is any
2911 // listed cpu and any listed features.
2912 unsigned BuiltinID = TargetDecl->getBuiltinID();
2913 std::string MissingFeature;
2914 llvm::StringMap<bool> CallerFeatureMap;
2915 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2916 // When compiling in HipStdPar mode we have to be conservative in rejecting
2917 // target specific features in the FE, and defer the possible error to the
2918 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2919 // referenced by an accelerator executable function, we emit an error.
2920 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2921 if (BuiltinID) {
2922 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2924 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2925 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2926 << TargetDecl->getDeclName()
2927 << FeatureList;
2928 }
2929 } else if (!TargetDecl->isMultiVersion() &&
2930 TargetDecl->hasAttr<TargetAttr>()) {
2931 // Get the required features for the callee.
2932
2933 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2935 CGM.getContext().filterFunctionTargetAttrs(TD);
2936
2937 SmallVector<StringRef, 1> ReqFeatures;
2938 llvm::StringMap<bool> CalleeFeatureMap;
2939 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2940
2941 for (const auto &F : ParsedAttr.Features) {
2942 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2943 ReqFeatures.push_back(StringRef(F).substr(1));
2944 }
2945
2946 for (const auto &F : CalleeFeatureMap) {
2947 // Only positive features are "required".
2948 if (F.getValue())
2949 ReqFeatures.push_back(F.getKey());
2950 }
2951 if (!llvm::all_of(ReqFeatures,
2952 [&](StringRef Feature) {
2953 if (!CallerFeatureMap.lookup(Feature)) {
2954 MissingFeature = Feature.str();
2955 return false;
2956 }
2957 return true;
2958 }) &&
2959 !IsHipStdPar) {
2960 if (IsAlwaysInline)
2961 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2962 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2963 else if (IsFlatten)
2964 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2965 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2966 }
2967
2968 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2969 llvm::StringMap<bool> CalleeFeatureMap;
2970 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2971
2972 for (const auto &F : CalleeFeatureMap) {
2973 if (F.getValue() &&
2974 (!CallerFeatureMap.lookup(F.getKey()) ||
2975 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2976 !IsHipStdPar) {
2977 if (IsAlwaysInline)
2978 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2979 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2980 else if (IsFlatten)
2981 CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
2982 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2983 }
2984 }
2985 }
2986}
2987
2988void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2989 if (!CGM.getCodeGenOpts().SanitizeStats)
2990 return;
2991
2992 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2993 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2994 CGM.getSanStats().create(IRB, SSK);
2995}
2996
2998 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2999 const CGCalleeInfo &CI = Callee.getAbstractInfo();
3001 if (!FP)
3002 return;
3003
3004 StringRef Salt;
3005 if (const auto &Info = FP->getExtraAttributeInfo())
3006 Salt = Info.CFISalt;
3007
3008 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar(), Salt));
3009}
3010
3011llvm::Value *
3012CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
3013 return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(RO.Features);
3014}
3015
3016llvm::Value *
3017CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
3018 llvm::Value *Condition = nullptr;
3019
3020 if (RO.Architecture) {
3021 StringRef Arch = *RO.Architecture;
3022 // If arch= specifies an x86-64 micro-architecture level, test the feature
3023 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
3024 if (Arch.starts_with("x86-64"))
3025 Condition = EmitX86CpuSupports({Arch});
3026 else
3027 Condition = EmitX86CpuIs(Arch);
3028 }
3029
3030 if (!RO.Features.empty()) {
3031 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Features);
3032 Condition =
3033 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
3034 }
3035 return Condition;
3036}
3037
3039 llvm::Function *Resolver,
3040 CGBuilderTy &Builder,
3041 llvm::Function *FuncToReturn,
3042 bool SupportsIFunc) {
3043 if (SupportsIFunc) {
3044 Builder.CreateRet(FuncToReturn);
3045 return;
3046 }
3047
3049 llvm::make_pointer_range(Resolver->args()));
3050
3051 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
3052 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
3053
3054 if (Resolver->getReturnType()->isVoidTy())
3055 Builder.CreateRetVoid();
3056 else
3057 Builder.CreateRet(Result);
3058}
3059
3061 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3062
3063 llvm::Triple::ArchType ArchType =
3064 getContext().getTargetInfo().getTriple().getArch();
3065
3066 switch (ArchType) {
3067 case llvm::Triple::x86:
3068 case llvm::Triple::x86_64:
3069 EmitX86MultiVersionResolver(Resolver, Options);
3070 return;
3071 case llvm::Triple::aarch64:
3072 EmitAArch64MultiVersionResolver(Resolver, Options);
3073 return;
3074 case llvm::Triple::riscv32:
3075 case llvm::Triple::riscv64:
3076 case llvm::Triple::riscv32be:
3077 case llvm::Triple::riscv64be:
3078 EmitRISCVMultiVersionResolver(Resolver, Options);
3079 return;
3080 case llvm::Triple::ppc:
3081 case llvm::Triple::ppc64:
3082 if (getContext().getTargetInfo().getTriple().isOSAIX()) {
3083 EmitPPCAIXMultiVersionResolver(Resolver, Options);
3084 return;
3085 }
3086 [[fallthrough]];
3087 default:
3088 assert(false &&
3089 "Only implemented for x86, AArch64, RISC-V, and PowerPC AIX");
3090 }
3091}
3092
3093/**
3094 * define internal ptr @foo.resolver() {
3095 * entry:
3096 * %is_version_1 = __builtin_cpu_supports(version_1)
3097 * br i1 %1, label %if.version_1, label %if.else_2
3098 *
3099 * if.version_1:
3100 * ret ptr @foo.version_1
3101 *
3102 * if.else_2:
3103 * %is_version_2 = __builtin_cpu_supports(version_2)
3104 * ...
3105 * if.else: ; preds = %entry
3106 * ret ptr @foo.default
3107 * }
3108 */
3110 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3111
3112 // entry:
3113 llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
3114
3116 for (const FMVResolverOption &RO : Options) {
3117 Builder.SetInsertPoint(CurBlock);
3118 // The 'default' or 'generic' case.
3119 if (!RO.Architecture && RO.Features.empty()) {
3120 // if.else:
3121 // ret ptr @foo.default
3122 assert(&RO == Options.end() - 1 &&
3123 "Default or Generic case must be last");
3124 Builder.CreateRet(RO.Function);
3125 return;
3126 }
3127 // if.else_n:
3128 // %is_version_n = __builtin_cpu_supports(version_n)
3129 // br i1 %is_version_n, label %if.version_n, label %if.else_n+1
3130 //
3131 // if.version_n:
3132 // ret ptr @foo_version_n
3133 assert(RO.Features.size() == 1 &&
3134 "for now one feature requirement per version");
3135
3136 assert(RO.Features[0].starts_with("cpu="));
3137 StringRef CPU = RO.Features[0].split("=").second.trim();
3138 StringRef Feature = llvm::StringSwitch<StringRef>(CPU)
3139 .Case("pwr7", "arch_2_06")
3140 .Case("pwr8", "arch_2_07")
3141 .Case("pwr9", "arch_3_00")
3142 .Case("pwr10", "arch_3_1")
3143 .Case("pwr11", "arch_3_1")
3144 .Default("error");
3145
3146 llvm::Value *Condition = EmitPPCBuiltinCpu(
3147 Builtin::BI__builtin_cpu_supports, Builder.getInt1Ty(), Feature);
3148
3149 llvm::BasicBlock *ThenBlock = createBasicBlock("if.version", Resolver);
3150 CurBlock = createBasicBlock("if.else", Resolver);
3151 Builder.CreateCondBr(Condition, ThenBlock, CurBlock);
3152
3153 Builder.SetInsertPoint(ThenBlock);
3154 Builder.CreateRet(RO.Function);
3155 }
3156
3157 llvm_unreachable("Default case missing");
3158}
3159
3161 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3162
3163 if (getContext().getTargetInfo().getTriple().getOS() !=
3164 llvm::Triple::OSType::Linux) {
3165 CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
3166 return;
3167 }
3168
3169 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3170 Builder.SetInsertPoint(CurBlock);
3172
3173 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3174 bool HasDefault = false;
3175 unsigned DefaultIndex = 0;
3176
3177 // Check the each candidate function.
3178 for (unsigned Index = 0; Index < Options.size(); Index++) {
3179
3180 if (Options[Index].Features.empty()) {
3181 HasDefault = true;
3182 DefaultIndex = Index;
3183 continue;
3184 }
3185
3186 Builder.SetInsertPoint(CurBlock);
3187
3188 // FeaturesCondition: The bitmask of the required extension has been
3189 // enabled by the runtime object.
3190 // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3191 // REQUIRED_BITMASK
3192 //
3193 // When condition is met, return this version of the function.
3194 // Otherwise, try the next version.
3195 //
3196 // if (FeaturesConditionVersion1)
3197 // return Version1;
3198 // else if (FeaturesConditionVersion2)
3199 // return Version2;
3200 // else if (FeaturesConditionVersion3)
3201 // return Version3;
3202 // ...
3203 // else
3204 // return DefaultVersion;
3205
3206 // TODO: Add a condition to check the length before accessing elements.
3207 // Without checking the length first, we may access an incorrect memory
3208 // address when using different versions.
3209 llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3210 llvm::SmallVector<std::string, 8> TargetAttrFeats;
3211
3212 for (StringRef Feat : Options[Index].Features) {
3213 std::vector<std::string> FeatStr =
3215
3216 assert(FeatStr.size() == 1 && "Feature string not delimited");
3217
3218 std::string &CurrFeat = FeatStr.front();
3219 if (CurrFeat[0] == '+')
3220 TargetAttrFeats.push_back(CurrFeat.substr(1));
3221 }
3222
3223 if (TargetAttrFeats.empty())
3224 continue;
3225
3226 for (std::string &Feat : TargetAttrFeats)
3227 CurrTargetAttrFeats.push_back(Feat);
3228
3229 Builder.SetInsertPoint(CurBlock);
3230 llvm::Value *FeatsCondition = EmitRISCVCpuSupports(CurrTargetAttrFeats);
3231
3232 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3233 CGBuilderTy RetBuilder(CGM, RetBlock);
3234 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder,
3235 Options[Index].Function, SupportsIFunc);
3236 llvm::BasicBlock *ElseBlock = createBasicBlock("resolver_else", Resolver);
3237
3238 Builder.SetInsertPoint(CurBlock);
3239 Builder.CreateCondBr(FeatsCondition, RetBlock, ElseBlock);
3240
3241 CurBlock = ElseBlock;
3242 }
3243
3244 // Finally, emit the default one.
3245 if (HasDefault) {
3246 Builder.SetInsertPoint(CurBlock);
3248 CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc);
3249 return;
3250 }
3251
3252 // If no generic/default, emit an unreachable.
3253 Builder.SetInsertPoint(CurBlock);
3254 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3255 TrapCall->setDoesNotReturn();
3256 TrapCall->setDoesNotThrow();
3257 Builder.CreateUnreachable();
3258 Builder.ClearInsertionPoint();
3259}
3260
3262 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3263 assert(!Options.empty() && "No multiversion resolver options found");
3264 assert(Options.back().Features.size() == 0 && "Default case must be last");
3265 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3266 assert(SupportsIFunc &&
3267 "Multiversion resolver requires target IFUNC support");
3268 bool AArch64CpuInitialized = false;
3269 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3270
3271 for (const FMVResolverOption &RO : Options) {
3272 Builder.SetInsertPoint(CurBlock);
3273 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3274
3275 // The 'default' or 'all features enabled' case.
3276 if (!Condition) {
3277 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3278 SupportsIFunc);
3279 return;
3280 }
3281
3282 if (!AArch64CpuInitialized) {
3283 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
3284 EmitAArch64CpuInit();
3285 AArch64CpuInitialized = true;
3286 Builder.SetInsertPoint(CurBlock);
3287 }
3288
3289 // Skip unreachable versions.
3290 if (RO.Function == nullptr)
3291 continue;
3292
3293 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3294 CGBuilderTy RetBuilder(CGM, RetBlock);
3295 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3296 SupportsIFunc);
3297 CurBlock = createBasicBlock("resolver_else", Resolver);
3298 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3299 }
3300
3301 // If no default, emit an unreachable.
3302 Builder.SetInsertPoint(CurBlock);
3303 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3304 TrapCall->setDoesNotReturn();
3305 TrapCall->setDoesNotThrow();
3306 Builder.CreateUnreachable();
3307 Builder.ClearInsertionPoint();
3308}
3309
3311 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3312
3313 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3314
3315 // Main function's basic block.
3316 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3317 Builder.SetInsertPoint(CurBlock);
3318 EmitX86CpuInit();
3319
3320 for (const FMVResolverOption &RO : Options) {
3321 Builder.SetInsertPoint(CurBlock);
3322 llvm::Value *Condition = FormX86ResolverCondition(RO);
3323
3324 // The 'default' or 'generic' case.
3325 if (!Condition) {
3326 assert(&RO == Options.end() - 1 &&
3327 "Default or Generic case must be last");
3328 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3329 SupportsIFunc);
3330 return;
3331 }
3332
3333 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3334 CGBuilderTy RetBuilder(CGM, RetBlock);
3335 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3336 SupportsIFunc);
3337 CurBlock = createBasicBlock("resolver_else", Resolver);
3338 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3339 }
3340
3341 // If no generic/default, emit an unreachable.
3342 Builder.SetInsertPoint(CurBlock);
3343 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3344 TrapCall->setDoesNotReturn();
3345 TrapCall->setDoesNotThrow();
3346 Builder.CreateUnreachable();
3347 Builder.ClearInsertionPoint();
3348}
3349
3350// Loc - where the diagnostic will point, where in the source code this
3351// alignment has failed.
3352// SecondaryLoc - if present (will be present if sufficiently different from
3353// Loc), the diagnostic will additionally point a "Note:" to this location.
3354// It should be the location where the __attribute__((assume_aligned))
3355// was written e.g.
3357 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3358 SourceLocation SecondaryLoc, llvm::Value *Alignment,
3359 llvm::Value *OffsetValue, llvm::Value *TheCheck,
3360 llvm::Instruction *Assumption) {
3361 assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3362 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3363 llvm::Intrinsic::getOrInsertDeclaration(
3364 Builder.GetInsertBlock()->getParent()->getParent(),
3365 llvm::Intrinsic::assume) &&
3366 "Assumption should be a call to llvm.assume().");
3367 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3368 "Assumption should be the last instruction of the basic block, "
3369 "since the basic block is still being generated.");
3370
3371 if (!SanOpts.has(SanitizerKind::Alignment))
3372 return;
3373
3374 // Don't check pointers to volatile data. The behavior here is implementation-
3375 // defined.
3377 return;
3378
3379 // We need to temorairly remove the assumption so we can insert the
3380 // sanitizer check before it, else the check will be dropped by optimizations.
3381 Assumption->removeFromParent();
3382
3383 {
3384 auto CheckOrdinal = SanitizerKind::SO_Alignment;
3385 auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3386 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3387
3388 if (!OffsetValue)
3389 OffsetValue = Builder.getInt1(false); // no offset.
3390
3391 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3392 EmitCheckSourceLocation(SecondaryLoc),
3394 llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3395 EmitCheck({std::make_pair(TheCheck, CheckOrdinal)}, CheckHandler,
3396 StaticData, DynamicData);
3397 }
3398
3399 // We are now in the (new, empty) "cont" basic block.
3400 // Reintroduce the assumption.
3401 Builder.Insert(Assumption);
3402 // FIXME: Assumption still has it's original basic block as it's Parent.
3403}
3404
3406 if (CGDebugInfo *DI = getDebugInfo())
3407 return DI->SourceLocToDebugLoc(Location);
3408
3409 return llvm::DebugLoc();
3410}
3411
3412llvm::Value *
3413CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3414 Stmt::Likelihood LH) {
3415 switch (LH) {
3416 case Stmt::LH_None:
3417 return Cond;
3418 case Stmt::LH_Likely:
3419 case Stmt::LH_Unlikely:
3420 // Don't generate llvm.expect on -O0 as the backend won't use it for
3421 // anything.
3422 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3423 return Cond;
3424 llvm::Type *CondTy = Cond->getType();
3425 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3426 llvm::Function *FnExpect =
3427 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3428 llvm::Value *ExpectedValueOfCond =
3429 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
3430 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
3431 Cond->getName() + ".expval");
3432 }
3433 llvm_unreachable("Unknown Likelihood");
3434}
3435
3436llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3437 unsigned NumElementsDst,
3438 const llvm::Twine &Name) {
3439 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3440 unsigned NumElementsSrc = SrcTy->getNumElements();
3441 if (NumElementsSrc == NumElementsDst)
3442 return SrcVec;
3443
3444 std::vector<int> ShuffleMask(NumElementsDst, -1);
3445 for (unsigned MaskIdx = 0;
3446 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3447 ShuffleMask[MaskIdx] = MaskIdx;
3448
3449 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3450}
3451
3453 const CGPointerAuthInfo &PointerAuth,
3455 if (!PointerAuth.isSigned())
3456 return;
3457
3458 auto *Key = Builder.getInt32(PointerAuth.getKey());
3459
3460 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3461 if (!Discriminator)
3462 Discriminator = Builder.getSize(0);
3463
3464 llvm::Value *Args[] = {Key, Discriminator};
3465 Bundles.emplace_back("ptrauth", Args);
3466}
3467
3469 const CGPointerAuthInfo &PointerAuth,
3470 llvm::Value *Pointer,
3471 unsigned IntrinsicID) {
3472 if (!PointerAuth)
3473 return Pointer;
3474
3475 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3476
3477 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3478 if (!Discriminator) {
3479 Discriminator = CGF.Builder.getSize(0);
3480 }
3481
3482 // Convert the pointer to intptr_t before signing it.
3483 auto OrigType = Pointer->getType();
3484 Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
3485
3486 // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3487 auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
3488 Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
3489
3490 // Convert back to the original type.
3491 Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3492 return Pointer;
3493}
3494
3495llvm::Value *
3497 llvm::Value *Pointer) {
3498 if (!PointerAuth.shouldSign())
3499 return Pointer;
3500 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3501 llvm::Intrinsic::ptrauth_sign);
3502}
3503
3504static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3505 const CGPointerAuthInfo &PointerAuth,
3506 llvm::Value *Pointer) {
3507 auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3508
3509 auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3510 // Convert the pointer to intptr_t before signing it.
3511 auto OrigType = Pointer->getType();
3513 StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
3514 return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3515}
3516
3517llvm::Value *
3519 llvm::Value *Pointer) {
3520 if (PointerAuth.shouldStrip()) {
3521 return EmitStrip(*this, PointerAuth, Pointer);
3522 }
3523 if (!PointerAuth.shouldAuth()) {
3524 return Pointer;
3525 }
3526
3527 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3528 llvm::Intrinsic::ptrauth_auth);
3529}
3530
3532 llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3533 if (CGDebugInfo *DI = getDebugInfo())
3534 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3535}
3536
3538 llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3539 if (CGDebugInfo *DI = getDebugInfo())
3540 DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3541}
3542
3543void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3544 llvm::Value *Backup) {
3545 if (CGDebugInfo *DI = getDebugInfo()) {
3547 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3548 }
3549}
3550
3552 QualType Ty) {
3553 for (auto &Field : getContext().findPFPFields(Ty)) {
3554 if (getContext().arePFPFieldsTriviallyCopyable(Field.Field->getParent()))
3555 continue;
3556 auto DestFieldPtr = EmitAddressOfPFPField(DestPtr, Field);
3557 auto SrcFieldPtr = EmitAddressOfPFPField(SrcPtr, Field);
3558 Builder.CreateStore(Builder.CreateLoad(SrcFieldPtr), DestFieldPtr);
3559 }
3560}
static void findPFPFields(const ASTContext &Ctx, QualType Ty, CharUnits Offset, std::vector< PFPField > &Fields, bool IncludeVBases)
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static llvm::Value * EmitPointerAuthCommon(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer, unsigned IntrinsicID)
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static llvm::Value * EmitStrip(CodeGenFunction &CGF, const CGPointerAuthInfo &PointerAuth, llvm::Value *Pointer)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static LValue makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType, bool MightBeSigned, CodeGenFunction &CGF, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
static StringRef getTriple(const Command &Job)
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
bool hasAnyFunctionEffects() const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:924
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3777
QualType getElementType() const
Definition TypeBase.h:3789
Attr - This represents one attribute.
Definition Attr.h:46
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
static bool isLogicalOp(Opcode Opc)
Definition Expr.h:4174
BinaryOperatorKind Opcode
Definition Expr.h:4046
Represents a C++ constructor within a class.
Definition DeclCXX.h:2624
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition DeclCXX.cpp:2728
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2275
QualType getThisType() const
Return the type of the this pointer.
Definition DeclCXX.cpp:2861
bool isStatic() const
Definition DeclCXX.cpp:2419
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition DeclCXX.h:1018
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition DeclCXX.cpp:1790
bool isCapturelessLambda() const
Definition DeclCXX.h:1064
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1212
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
SourceLocation getBeginLoc() const
Definition Expr.h:3280
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::ConstantInt * getSize(CharUnits N)
Definition CGBuilder.h:109
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
Abstract information about a function or function prototype.
Definition CGCall.h:41
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition CGCall.h:56
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
llvm::Value * getDiscriminator() const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
An object to manage conditionally-evaluated expressions.
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitPPCAIXMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
define internal ptr @foo.resolver() { entry: is_version_1 = __builtin_cpu_supports(version_1) br i1 %...
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
llvm::Value * EmitPPCBuiltinCpu(unsigned BuiltinID, llvm::Type *ReturnType, StringRef CPUStr)
Definition PPC.cpp:73
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
Definition RISCV.cpp:970
llvm::Value * EmitRISCVCpuInit()
Definition RISCV.cpp:960
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3177
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:697
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void unprotectFromPeepholes(PeepholeProtection protection)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:7187
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4035
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition CGClass.cpp:801
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
bool hasSkipCounter(const Stmt *S) const
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitFunctionBody(const Stmt *Body)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3925
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:181
const TargetInfo & getTarget() const
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:572
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:256
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2520
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:188
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:237
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4183
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition CGClass.cpp:1513
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5359
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
bool isMCDCBranchExpr(const Expr *E) const
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Definition CGCall.cpp:3214
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
Address EmitVAListRef(const Expr *E)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
Definition CGClass.cpp:3234
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:58
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5653
void emitPFPPostCopyUpdates(Address DestPtr, Address SrcPtr, QualType Ty)
Copy all PFP fields from SrcPtr to DestPtr while updating signatures, assuming that DestPtr was alrea...
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition CGClass.cpp:1636
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition CGCall.cpp:4108
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1596
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:663
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
llvm::BasicBlock * GetIndirectGotoBlock()
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4603
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1712
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2090
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
This class organizes the cross-function state that is used while generating LLVM code.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
Per-function PGO state.
Definition CodeGenPGO.h:29
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition CGCall.cpp:394
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition TargetInfo.h:244
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1746
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
ValueDecl * getDecl()
Definition Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
T * getAttr() const
Definition DeclBase.h:581
ASTContext & getASTContext() const LLVM_READONLY
Definition DeclBase.cpp:547
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:567
SourceLocation getLocation() const
Definition DeclBase.h:447
bool hasAttr() const
Definition DeclBase.h:585
This represents one expression.
Definition Expr.h:112
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3989
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorType - Extended vector type.
Definition TypeBase.h:4322
LangOptions::FPExceptionModeKind getExceptionMode() const
bool allowFPContractAcrossStatement() const
RoundingMode getRoundingMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3175
Represents a function declaration or definition.
Definition Decl.h:2015
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition Decl.h:2704
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3245
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3728
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition Decl.h:2924
bool usesSEHTry() const
Indicates the function uses __try.
Definition Decl.h:2533
QualType getReturnType() const
Definition Decl.h:2860
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2789
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition Decl.cpp:4230
FunctionEffectsRef getFunctionEffects() const
Definition Decl.h:3149
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition Decl.cpp:3341
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition Decl.cpp:3492
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition Decl.h:2443
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition Decl.cpp:3334
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2400
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition Decl.cpp:4096
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
QualType desugar() const
Definition TypeBase.h:5943
FunctionTypeExtraAttributeInfo getExtraAttributeInfo() const
Return the extra attribute information.
Definition TypeBase.h:5851
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4558
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
KernelReferenceKind getKernelReferenceKind() const
Definition GlobalDecl.h:135
const Decl * getDecl() const
Definition GlobalDecl.h:106
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5568
Represents the declaration of a label.
Definition Decl.h:524
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
SanitizerSet Sanitize
Set of enabled sanitizers.
RoundingMode getDefaultRoundingMode() const
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition Decl.h:340
Represents a parameter to a function.
Definition Decl.h:1805
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition ProfileList.h:35
@ Allow
Profiling is allowed.
Definition ProfileList.h:33
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8520
field_range fields() const
Definition Decl.h:4545
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
child_range children()
Definition Stmt.cpp:304
StmtClass getStmtClass() const
Definition Stmt.h:1499
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1442
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1443
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1444
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1446
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts, ArmStreamingKind Mode, llvm::StringMap< bool > *FeatureMap=nullptr) const
Returns target-specific min and max values VScale_Range.
bool supportsIFunc() const
Identify whether this target supports IFuncs.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
bool isVoidType() const
Definition TypeBase.h:9039
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2266
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2855
TypeClass getTypeClass() const
Definition TypeBase.h:2438
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
bool isObjCRetainableType() const
Definition Type.cpp:5404
bool isFunctionNoProtoType() const
Definition TypeBase.h:2653
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8719
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
Expr * getSizeExpr() const
Definition TypeBase.h:4035
QualType getElementType() const
Definition TypeBase.h:4244
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition limits.h:64
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
CGBuilderInserter CGBuilderInserterTy
Definition CGBuilder.h:47
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
Expr * IgnoreBuiltinExpectSingleStep(Expr *E)
Definition IgnoreExpr.h:135
@ NonNull
Values of this type can never be null.
Definition Specifiers.h:351
Expr * IgnoreExprNodes(Expr *E, FnTys &&... Fns)
Given an expression E and functions Fn_1,...,Fn_n : Expr * -> Expr *, Recursively apply each of the f...
Definition IgnoreExpr.h:24
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition ASTLambda.h:28
@ Result
The result type of a method or function.
Definition TypeBase.h:905
Expr * IgnoreImplicitCastsSingleStep(Expr *E)
Definition IgnoreExpr.h:38
Expr * IgnoreUOpLNotSingleStep(Expr *E)
Definition IgnoreExpr.h:127
Expr * IgnoreParensSingleStep(Expr *E)
Definition IgnoreExpr.h:157
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
U cast(CodeGen::Address addr)
Definition Address.h:327
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition Decl.cpp:6065
@ Other
Other implicit parameter.
Definition Decl.h:1761
@ EST_None
no exception specification
@ Implicit
An implicit conversion.
Definition Sema.h:440
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
This structure provides a set of types that are commonly used during IR emission.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
A FunctionEffect plus a potential boolean expression determining whether the effect is declared (e....
Definition TypeBase.h:5099
Contains information gathered from parsing the contents of TargetAttr.
Definition TargetInfo.h:60
std::vector< std::string > Features
Definition TargetInfo.h:61
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174