clang 19.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/FPEnv.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/MDBuilder.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/Support/CRC.h"
48#include "llvm/Support/xxhash.h"
49#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
50#include "llvm/Transforms/Utils/PromoteMemToReg.h"
51#include <optional>
52
53using namespace clang;
54using namespace CodeGen;
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
61/// markers.
62static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
63 const LangOptions &LangOpts) {
64 if (CGOpts.DisableLifetimeMarkers)
65 return false;
66
67 // Sanitizers may use markers.
68 if (CGOpts.SanitizeAddressUseAfterScope ||
69 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
70 LangOpts.Sanitize.has(SanitizerKind::Memory))
71 return true;
72
73 // For now, only in optimized builds.
74 return CGOpts.OptimizationLevel != 0;
75}
76
77CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
78 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
79 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
81 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
82 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
83 ShouldEmitLifetimeMarkers(
84 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
85 if (!suppressNewContext)
86 CGM.getCXXABI().getMangleContext().startNewFunction();
87 EHStack.setCGF(this);
88
89 SetFastMathFlags(CurFPFeatures);
90}
91
92CodeGenFunction::~CodeGenFunction() {
93 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94
95 if (getLangOpts().OpenMP && CurFn)
97
98 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
99 // outlining etc) at some point. Doing it once the function codegen is done
100 // seems to be a reasonable spot. We do it here, as opposed to the deletion
101 // time of the CodeGenModule, because we have to ensure the IR has not yet
102 // been "emitted" to the outside, thus, modifications are still sensible.
103 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
105}
106
107// Map the LangOption for exception behavior into
108// the corresponding enum in the IR.
109llvm::fp::ExceptionBehavior
111
112 switch (Kind) {
113 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
114 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
115 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
116 default:
117 llvm_unreachable("Unsupported FP Exception Behavior");
118 }
119}
120
122 llvm::FastMathFlags FMF;
123 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
124 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
125 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
126 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
127 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
128 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
129 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
130 Builder.setFastMathFlags(FMF);
131}
132
134 const Expr *E)
135 : CGF(CGF) {
136 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
137}
138
140 FPOptions FPFeatures)
141 : CGF(CGF) {
142 ConstructorHelper(FPFeatures);
143}
144
145void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
146 OldFPFeatures = CGF.CurFPFeatures;
147 CGF.CurFPFeatures = FPFeatures;
148
149 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
150 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
151
152 if (OldFPFeatures == FPFeatures)
153 return;
154
155 FMFGuard.emplace(CGF.Builder);
156
157 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
158 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
159 auto NewExceptionBehavior =
161 FPFeatures.getExceptionMode()));
162 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
163
164 CGF.SetFastMathFlags(FPFeatures);
165
166 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
167 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
168 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
169 (NewExceptionBehavior == llvm::fp::ebIgnore &&
170 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
171 "FPConstrained should be enabled on entire function");
172
173 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
174 auto OldValue =
175 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
176 auto NewValue = OldValue & Value;
177 if (OldValue != NewValue)
178 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
179 };
180 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
181 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
182 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
183 mergeFnAttrValue(
184 "unsafe-fp-math",
185 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
186 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
187 FPFeatures.allowFPContractAcrossStatement());
188}
189
191 CGF.CurFPFeatures = OldFPFeatures;
192 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
193 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
194}
195
197 LValueBaseInfo BaseInfo;
198 TBAAAccessInfo TBAAInfo;
199 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
200 Address Addr(V, ConvertTypeForMem(T), Alignment);
201 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
202}
203
204/// Given a value of type T* that may not be to a complete object,
205/// construct an l-value with the natural pointee alignment of T.
206LValue
208 LValueBaseInfo BaseInfo;
209 TBAAAccessInfo TBAAInfo;
210 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
211 /* forPointeeType= */ true);
212 Address Addr(V, ConvertTypeForMem(T), Align);
213 return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
214}
215
216
218 return CGM.getTypes().ConvertTypeForMem(T);
219}
220
222 return CGM.getTypes().ConvertType(T);
223}
224
226 type = type.getCanonicalType();
227 while (true) {
228 switch (type->getTypeClass()) {
229#define TYPE(name, parent)
230#define ABSTRACT_TYPE(name, parent)
231#define NON_CANONICAL_TYPE(name, parent) case Type::name:
232#define DEPENDENT_TYPE(name, parent) case Type::name:
233#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
234#include "clang/AST/TypeNodes.inc"
235 llvm_unreachable("non-canonical or dependent type in IR-generation");
236
237 case Type::Auto:
238 case Type::DeducedTemplateSpecialization:
239 llvm_unreachable("undeduced type in IR-generation");
240
241 // Various scalar types.
242 case Type::Builtin:
243 case Type::Pointer:
244 case Type::BlockPointer:
245 case Type::LValueReference:
246 case Type::RValueReference:
247 case Type::MemberPointer:
248 case Type::Vector:
249 case Type::ExtVector:
250 case Type::ConstantMatrix:
251 case Type::FunctionProto:
252 case Type::FunctionNoProto:
253 case Type::Enum:
254 case Type::ObjCObjectPointer:
255 case Type::Pipe:
256 case Type::BitInt:
257 return TEK_Scalar;
258
259 // Complexes.
260 case Type::Complex:
261 return TEK_Complex;
262
263 // Arrays, records, and Objective-C objects.
264 case Type::ConstantArray:
265 case Type::IncompleteArray:
266 case Type::VariableArray:
267 case Type::Record:
268 case Type::ObjCObject:
269 case Type::ObjCInterface:
270 return TEK_Aggregate;
271
272 // We operate on atomic values according to their underlying type.
273 case Type::Atomic:
274 type = cast<AtomicType>(type)->getValueType();
275 continue;
276 }
277 llvm_unreachable("unknown type kind!");
278 }
279}
280
281llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
282 // For cleanliness, we try to avoid emitting the return block for
283 // simple cases.
284 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
285
286 if (CurBB) {
287 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
288
289 // We have a valid insert point, reuse it if it is empty or there are no
290 // explicit jumps to the return block.
291 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
292 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
293 delete ReturnBlock.getBlock();
294 ReturnBlock = JumpDest();
295 } else
297 return llvm::DebugLoc();
298 }
299
300 // Otherwise, if the return block is the target of a single direct
301 // branch then we can just put the code in that block instead. This
302 // cleans up functions which started with a unified return block.
303 if (ReturnBlock.getBlock()->hasOneUse()) {
304 llvm::BranchInst *BI =
305 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
306 if (BI && BI->isUnconditional() &&
307 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
308 // Record/return the DebugLoc of the simple 'return' expression to be used
309 // later by the actual 'ret' instruction.
310 llvm::DebugLoc Loc = BI->getDebugLoc();
311 Builder.SetInsertPoint(BI->getParent());
312 BI->eraseFromParent();
313 delete ReturnBlock.getBlock();
314 ReturnBlock = JumpDest();
315 return Loc;
316 }
317 }
318
319 // FIXME: We are at an unreachable point, there is no reason to emit the block
320 // unless it has uses. However, we still need a place to put the debug
321 // region.end for now.
322
324 return llvm::DebugLoc();
325}
326
327static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
328 if (!BB) return;
329 if (!BB->use_empty()) {
330 CGF.CurFn->insert(CGF.CurFn->end(), BB);
331 return;
332 }
333 delete BB;
334}
335
337 assert(BreakContinueStack.empty() &&
338 "mismatched push/pop in break/continue stack!");
339
340 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
341 && NumSimpleReturnExprs == NumReturnExprs
342 && ReturnBlock.getBlock()->use_empty();
343 // Usually the return expression is evaluated before the cleanup
344 // code. If the function contains only a simple return statement,
345 // such as a constant, the location before the cleanup code becomes
346 // the last useful breakpoint in the function, because the simple
347 // return expression will be evaluated after the cleanup code. To be
348 // safe, set the debug location for cleanup code to the location of
349 // the return statement. Otherwise the cleanup code should be at the
350 // end of the function's lexical scope.
351 //
352 // If there are multiple branches to the return block, the branch
353 // instructions will get the location of the return statements and
354 // all will be fine.
355 if (CGDebugInfo *DI = getDebugInfo()) {
356 if (OnlySimpleReturnStmts)
357 DI->EmitLocation(Builder, LastStopPoint);
358 else
359 DI->EmitLocation(Builder, EndLoc);
360 }
361
362 // Pop any cleanups that might have been associated with the
363 // parameters. Do this in whatever block we're currently in; it's
364 // important to do this before we enter the return block or return
365 // edges will be *really* confused.
366 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
367 bool HasOnlyLifetimeMarkers =
369 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
370
371 std::optional<ApplyDebugLocation> OAL;
372 if (HasCleanups) {
373 // Make sure the line table doesn't jump back into the body for
374 // the ret after it's been at EndLoc.
375 if (CGDebugInfo *DI = getDebugInfo()) {
376 if (OnlySimpleReturnStmts)
377 DI->EmitLocation(Builder, EndLoc);
378 else
379 // We may not have a valid end location. Try to apply it anyway, and
380 // fall back to an artificial location if needed.
382 }
383
385 }
386
387 // Emit function epilog (to return).
388 llvm::DebugLoc Loc = EmitReturnBlock();
389
391 if (CGM.getCodeGenOpts().InstrumentFunctions)
392 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
393 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
394 CurFn->addFnAttr("instrument-function-exit-inlined",
395 "__cyg_profile_func_exit");
396 }
397
398 // Emit debug descriptor for function end.
399 if (CGDebugInfo *DI = getDebugInfo())
400 DI->EmitFunctionEnd(Builder, CurFn);
401
402 // Reset the debug location to that of the simple 'return' expression, if any
403 // rather than that of the end of the function's scope '}'.
404 ApplyDebugLocation AL(*this, Loc);
405 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
407
408 assert(EHStack.empty() &&
409 "did not remove all scopes from cleanup stack!");
410
411 // If someone did an indirect goto, emit the indirect goto block at the end of
412 // the function.
413 if (IndirectBranch) {
414 EmitBlock(IndirectBranch->getParent());
415 Builder.ClearInsertionPoint();
416 }
417
418 // If some of our locals escaped, insert a call to llvm.localescape in the
419 // entry block.
420 if (!EscapedLocals.empty()) {
421 // Invert the map from local to index into a simple vector. There should be
422 // no holes.
424 EscapeArgs.resize(EscapedLocals.size());
425 for (auto &Pair : EscapedLocals)
426 EscapeArgs[Pair.second] = Pair.first;
427 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
428 &CGM.getModule(), llvm::Intrinsic::localescape);
429 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
430 }
431
432 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
433 llvm::Instruction *Ptr = AllocaInsertPt;
434 AllocaInsertPt = nullptr;
435 Ptr->eraseFromParent();
436
437 // PostAllocaInsertPt, if created, was lazily created when it was required,
438 // remove it now since it was just created for our own convenience.
439 if (PostAllocaInsertPt) {
440 llvm::Instruction *PostPtr = PostAllocaInsertPt;
441 PostAllocaInsertPt = nullptr;
442 PostPtr->eraseFromParent();
443 }
444
445 // If someone took the address of a label but never did an indirect goto, we
446 // made a zero entry PHI node, which is illegal, zap it now.
447 if (IndirectBranch) {
448 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
449 if (PN->getNumIncomingValues() == 0) {
450 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
451 PN->eraseFromParent();
452 }
453 }
454
456 EmitIfUsed(*this, TerminateLandingPad);
457 EmitIfUsed(*this, TerminateHandler);
458 EmitIfUsed(*this, UnreachableBlock);
459
460 for (const auto &FuncletAndParent : TerminateFunclets)
461 EmitIfUsed(*this, FuncletAndParent.second);
462
463 if (CGM.getCodeGenOpts().EmitDeclMetadata)
464 EmitDeclMetadata();
465
466 for (const auto &R : DeferredReplacements) {
467 if (llvm::Value *Old = R.first) {
468 Old->replaceAllUsesWith(R.second);
469 cast<llvm::Instruction>(Old)->eraseFromParent();
470 }
471 }
472 DeferredReplacements.clear();
473
474 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
475 // PHIs if the current function is a coroutine. We don't do it for all
476 // functions as it may result in slight increase in numbers of instructions
477 // if compiled with no optimizations. We do it for coroutine as the lifetime
478 // of CleanupDestSlot alloca make correct coroutine frame building very
479 // difficult.
481 llvm::DominatorTree DT(*CurFn);
482 llvm::PromoteMemToReg(
483 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
485 }
486
487 // Scan function arguments for vector width.
488 for (llvm::Argument &A : CurFn->args())
489 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
490 LargestVectorWidth =
491 std::max((uint64_t)LargestVectorWidth,
492 VT->getPrimitiveSizeInBits().getKnownMinValue());
493
494 // Update vector width based on return type.
495 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
496 LargestVectorWidth =
497 std::max((uint64_t)LargestVectorWidth,
498 VT->getPrimitiveSizeInBits().getKnownMinValue());
499
500 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
501 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
502
503 // Add the min-legal-vector-width attribute. This contains the max width from:
504 // 1. min-vector-width attribute used in the source program.
505 // 2. Any builtins used that have a vector width specified.
506 // 3. Values passed in and out of inline assembly.
507 // 4. Width of vector arguments and return types for this function.
508 // 5. Width of vector arguments and return types for functions called by this
509 // function.
510 if (getContext().getTargetInfo().getTriple().isX86())
511 CurFn->addFnAttr("min-legal-vector-width",
512 llvm::utostr(LargestVectorWidth));
513
514 // Add vscale_range attribute if appropriate.
515 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
517 if (VScaleRange) {
518 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
519 getLLVMContext(), VScaleRange->first, VScaleRange->second));
520 }
521
522 // If we generated an unreachable return block, delete it now.
523 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
524 Builder.ClearInsertionPoint();
525 ReturnBlock.getBlock()->eraseFromParent();
526 }
527 if (ReturnValue.isValid()) {
528 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
529 if (RetAlloca && RetAlloca->use_empty()) {
530 RetAlloca->eraseFromParent();
532 }
533 }
534}
535
536/// ShouldInstrumentFunction - Return true if the current function should be
537/// instrumented with __cyg_profile_func_* calls
539 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
540 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
541 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
542 return false;
543 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
544 return false;
545 return true;
546}
547
549 if (!CurFuncDecl)
550 return false;
551 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
552}
553
554/// ShouldXRayInstrument - Return true if the current function should be
555/// instrumented with XRay nop sleds.
557 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
558}
559
560/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
561/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
563 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
564 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
567}
568
570 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
571 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
574}
575
576llvm::ConstantInt *
578 // Remove any (C++17) exception specifications, to allow calling e.g. a
579 // noexcept function through a non-noexcept pointer.
580 if (!Ty->isFunctionNoProtoType())
582 std::string Mangled;
583 llvm::raw_string_ostream Out(Mangled);
585 return llvm::ConstantInt::get(
586 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
587}
588
589void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
590 llvm::Function *Fn) {
591 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
592 return;
593
594 llvm::LLVMContext &Context = getLLVMContext();
595
596 CGM.GenKernelArgMetadata(Fn, FD, this);
597
598 if (!getLangOpts().OpenCL)
599 return;
600
601 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
602 QualType HintQTy = A->getTypeHint();
603 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
604 bool IsSignedInteger =
605 HintQTy->isSignedIntegerType() ||
606 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
607 llvm::Metadata *AttrMDArgs[] = {
608 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
609 CGM.getTypes().ConvertType(A->getTypeHint()))),
610 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
611 llvm::IntegerType::get(Context, 32),
612 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
613 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
614 }
615
616 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
617 llvm::Metadata *AttrMDArgs[] = {
618 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
619 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
620 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
621 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
622 }
623
624 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
625 llvm::Metadata *AttrMDArgs[] = {
626 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
627 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
628 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
629 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
630 }
631
632 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
633 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
634 llvm::Metadata *AttrMDArgs[] = {
635 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
636 Fn->setMetadata("intel_reqd_sub_group_size",
637 llvm::MDNode::get(Context, AttrMDArgs));
638 }
639}
640
641/// Determine whether the function F ends with a return stmt.
642static bool endsWithReturn(const Decl* F) {
643 const Stmt *Body = nullptr;
644 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
645 Body = FD->getBody();
646 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
647 Body = OMD->getBody();
648
649 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
650 auto LastStmt = CS->body_rbegin();
651 if (LastStmt != CS->body_rend())
652 return isa<ReturnStmt>(*LastStmt);
653 }
654 return false;
655}
656
658 if (SanOpts.has(SanitizerKind::Thread)) {
659 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
660 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
661 }
662}
663
664/// Check if the return value of this function requires sanitization.
665bool CodeGenFunction::requiresReturnValueCheck() const {
666 return requiresReturnValueNullabilityCheck() ||
667 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
668 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
669}
670
671static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
672 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
673 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
674 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
675 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
676 return false;
677
678 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
679 return false;
680
681 if (MD->getNumParams() == 2) {
682 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
683 if (!PT || !PT->isVoidPointerType() ||
684 !PT->getPointeeType().isConstQualified())
685 return false;
686 }
687
688 return true;
689}
690
691bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
692 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
693 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
694}
695
696bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
697 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
699 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
700 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
701 });
702}
703
704/// Return the UBSan prologue signature for \p FD if one is available.
705static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
706 const FunctionDecl *FD) {
707 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
708 if (!MD->isStatic())
709 return nullptr;
711}
712
714 llvm::Function *Fn,
715 const CGFunctionInfo &FnInfo,
716 const FunctionArgList &Args,
717 SourceLocation Loc,
718 SourceLocation StartLoc) {
719 assert(!CurFn &&
720 "Do not use a CodeGenFunction object for more than one function");
721
722 const Decl *D = GD.getDecl();
723
724 DidCallStackSave = false;
725 CurCodeDecl = D;
726 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
727 if (FD && FD->usesSEHTry())
728 CurSEHParent = GD;
729 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
730 FnRetTy = RetTy;
731 CurFn = Fn;
732 CurFnInfo = &FnInfo;
733 assert(CurFn->isDeclaration() && "Function already has body?");
734
735 // If this function is ignored for any of the enabled sanitizers,
736 // disable the sanitizer for the function.
737 do {
738#define SANITIZER(NAME, ID) \
739 if (SanOpts.empty()) \
740 break; \
741 if (SanOpts.has(SanitizerKind::ID)) \
742 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
743 SanOpts.set(SanitizerKind::ID, false);
744
745#include "clang/Basic/Sanitizers.def"
746#undef SANITIZER
747 } while (false);
748
749 if (D) {
750 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
751 SanitizerMask no_sanitize_mask;
752 bool NoSanitizeCoverage = false;
753
754 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
755 no_sanitize_mask |= Attr->getMask();
756 // SanitizeCoverage is not handled by SanOpts.
757 if (Attr->hasCoverage())
758 NoSanitizeCoverage = true;
759 }
760
761 // Apply the no_sanitize* attributes to SanOpts.
762 SanOpts.Mask &= ~no_sanitize_mask;
763 if (no_sanitize_mask & SanitizerKind::Address)
764 SanOpts.set(SanitizerKind::KernelAddress, false);
765 if (no_sanitize_mask & SanitizerKind::KernelAddress)
766 SanOpts.set(SanitizerKind::Address, false);
767 if (no_sanitize_mask & SanitizerKind::HWAddress)
768 SanOpts.set(SanitizerKind::KernelHWAddress, false);
769 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
770 SanOpts.set(SanitizerKind::HWAddress, false);
771
772 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
773 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
774
775 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
776 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
777
778 // Some passes need the non-negated no_sanitize attribute. Pass them on.
780 if (no_sanitize_mask & SanitizerKind::Thread)
781 Fn->addFnAttr("no_sanitize_thread");
782 }
783 }
784
786 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
787 } else {
788 // Apply sanitizer attributes to the function.
789 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
790 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
791 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
792 SanitizerKind::KernelHWAddress))
793 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
794 if (SanOpts.has(SanitizerKind::MemtagStack))
795 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
796 if (SanOpts.has(SanitizerKind::Thread))
797 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
798 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
799 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
800 }
801 if (SanOpts.has(SanitizerKind::SafeStack))
802 Fn->addFnAttr(llvm::Attribute::SafeStack);
803 if (SanOpts.has(SanitizerKind::ShadowCallStack))
804 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
805
806 // Apply fuzzing attribute to the function.
807 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
808 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
809
810 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
811 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
812 if (SanOpts.has(SanitizerKind::Thread)) {
813 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
814 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
815 if (OMD->getMethodFamily() == OMF_dealloc ||
816 OMD->getMethodFamily() == OMF_initialize ||
817 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
819 }
820 }
821 }
822
823 // Ignore unrelated casts in STL allocate() since the allocator must cast
824 // from void* to T* before object initialization completes. Don't match on the
825 // namespace because not all allocators are in std::
826 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
828 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
829 }
830
831 // Ignore null checks in coroutine functions since the coroutines passes
832 // are not aware of how to move the extra UBSan instructions across the split
833 // coroutine boundaries.
834 if (D && SanOpts.has(SanitizerKind::Null))
835 if (FD && FD->getBody() &&
836 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
837 SanOpts.Mask &= ~SanitizerKind::Null;
838
839 // Apply xray attributes to the function (as a string, for now)
840 bool AlwaysXRayAttr = false;
841 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
846 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
847 Fn->addFnAttr("function-instrument", "xray-always");
848 AlwaysXRayAttr = true;
849 }
850 if (XRayAttr->neverXRayInstrument())
851 Fn->addFnAttr("function-instrument", "xray-never");
852 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
854 Fn->addFnAttr("xray-log-args",
855 llvm::utostr(LogArgs->getArgumentCount()));
856 }
857 } else {
859 Fn->addFnAttr(
860 "xray-instruction-threshold",
861 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
862 }
863
865 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
866 Fn->addFnAttr("xray-ignore-loops");
867
870 Fn->addFnAttr("xray-skip-exit");
871
874 Fn->addFnAttr("xray-skip-entry");
875
876 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
877 if (FuncGroups > 1) {
878 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
879 CurFn->getName().bytes_end());
880 auto Group = crc32(FuncName) % FuncGroups;
881 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
882 !AlwaysXRayAttr)
883 Fn->addFnAttr("function-instrument", "xray-never");
884 }
885 }
886
887 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
888 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
890 Fn->addFnAttr(llvm::Attribute::SkipProfile);
891 break;
893 Fn->addFnAttr(llvm::Attribute::NoProfile);
894 break;
896 break;
897 }
898 }
899
900 unsigned Count, Offset;
901 if (const auto *Attr =
902 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
903 Count = Attr->getCount();
904 Offset = Attr->getOffset();
905 } else {
906 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
907 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
908 }
909 if (Count && Offset <= Count) {
910 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
911 if (Offset)
912 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
913 }
914 // Instruct that functions for COFF/CodeView targets should start with a
915 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
916 // backends as they don't need it -- instructions on these architectures are
917 // always atomically patchable at runtime.
918 if (CGM.getCodeGenOpts().HotPatch &&
919 getContext().getTargetInfo().getTriple().isX86() &&
920 getContext().getTargetInfo().getTriple().getEnvironment() !=
921 llvm::Triple::CODE16)
922 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
923
924 // Add no-jump-tables value.
925 if (CGM.getCodeGenOpts().NoUseJumpTables)
926 Fn->addFnAttr("no-jump-tables", "true");
927
928 // Add no-inline-line-tables value.
929 if (CGM.getCodeGenOpts().NoInlineLineTables)
930 Fn->addFnAttr("no-inline-line-tables");
931
932 // Add profile-sample-accurate value.
933 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
934 Fn->addFnAttr("profile-sample-accurate");
935
936 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
937 Fn->addFnAttr("use-sample-profile");
938
939 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
940 Fn->addFnAttr("cfi-canonical-jump-table");
941
942 if (D && D->hasAttr<NoProfileFunctionAttr>())
943 Fn->addFnAttr(llvm::Attribute::NoProfile);
944
945 if (D) {
946 // Function attributes take precedence over command line flags.
947 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
948 switch (A->getThunkType()) {
949 case FunctionReturnThunksAttr::Kind::Keep:
950 break;
951 case FunctionReturnThunksAttr::Kind::Extern:
952 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
953 break;
954 }
955 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
956 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
957 }
958
959 if (FD && (getLangOpts().OpenCL ||
960 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
961 // Add metadata for a kernel function.
962 EmitKernelMetadata(FD, Fn);
963 }
964
965 // If we are checking function types, emit a function type signature as
966 // prologue data.
967 if (FD && SanOpts.has(SanitizerKind::Function)) {
968 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
969 llvm::LLVMContext &Ctx = Fn->getContext();
970 llvm::MDBuilder MDB(Ctx);
971 Fn->setMetadata(
972 llvm::LLVMContext::MD_func_sanitize,
973 MDB.createRTTIPointerPrologue(
974 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
975 }
976 }
977
978 // If we're checking nullability, we need to know whether we can check the
979 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
980 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
982 if (Nullability && *Nullability == NullabilityKind::NonNull) {
983 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
984 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
985 RetValNullabilityPrecondition =
986 llvm::ConstantInt::getTrue(getLLVMContext());
987 }
988 }
989
990 // If we're in C++ mode and the function name is "main", it is guaranteed
991 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
992 // used within a program").
993 //
994 // OpenCL C 2.0 v2.2-11 s6.9.i:
995 // Recursion is not supported.
996 //
997 // SYCL v1.2.1 s3.10:
998 // kernels cannot include RTTI information, exception classes,
999 // recursive code, virtual functions or make use of C++ libraries that
1000 // are not compiled for the device.
1001 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
1002 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
1003 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1004 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1005
1006 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1007 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1008 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1009 Builder.setDefaultConstrainedRounding(RM);
1010 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1011 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1012 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1013 RM != llvm::RoundingMode::NearestTiesToEven))) {
1014 Builder.setIsFPConstrained(true);
1015 Fn->addFnAttr(llvm::Attribute::StrictFP);
1016 }
1017
1018 // If a custom alignment is used, force realigning to this alignment on
1019 // any main function which certainly will need it.
1020 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1021 CGM.getCodeGenOpts().StackAlignment))
1022 Fn->addFnAttr("stackrealign");
1023
1024 // "main" doesn't need to zero out call-used registers.
1025 if (FD && FD->isMain())
1026 Fn->removeFnAttr("zero-call-used-regs");
1027
1028 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1029
1030 // Create a marker to make it easy to insert allocas into the entryblock
1031 // later. Don't create this with the builder, because we don't want it
1032 // folded.
1033 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1034 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1035
1037
1038 Builder.SetInsertPoint(EntryBB);
1039
1040 // If we're checking the return value, allocate space for a pointer to a
1041 // precise source location of the checked return statement.
1042 if (requiresReturnValueCheck()) {
1043 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1044 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1045 ReturnLocation);
1046 }
1047
1048 // Emit subprogram debug descriptor.
1049 if (CGDebugInfo *DI = getDebugInfo()) {
1050 // Reconstruct the type from the argument list so that implicit parameters,
1051 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1052 // convention.
1053 DI->emitFunctionStart(GD, Loc, StartLoc,
1054 DI->getFunctionType(FD, RetTy, Args), CurFn,
1056 }
1057
1059 if (CGM.getCodeGenOpts().InstrumentFunctions)
1060 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1061 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1062 CurFn->addFnAttr("instrument-function-entry-inlined",
1063 "__cyg_profile_func_enter");
1064 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1065 CurFn->addFnAttr("instrument-function-entry-inlined",
1066 "__cyg_profile_func_enter_bare");
1067 }
1068
1069 // Since emitting the mcount call here impacts optimizations such as function
1070 // inlining, we just add an attribute to insert a mcount call in backend.
1071 // The attribute "counting-function" is set to mcount function name which is
1072 // architecture dependent.
1073 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1074 // Calls to fentry/mcount should not be generated if function has
1075 // the no_instrument_function attribute.
1076 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1077 if (CGM.getCodeGenOpts().CallFEntry)
1078 Fn->addFnAttr("fentry-call", "true");
1079 else {
1080 Fn->addFnAttr("instrument-function-entry-inlined",
1081 getTarget().getMCountName());
1082 }
1083 if (CGM.getCodeGenOpts().MNopMCount) {
1084 if (!CGM.getCodeGenOpts().CallFEntry)
1085 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1086 << "-mnop-mcount" << "-mfentry";
1087 Fn->addFnAttr("mnop-mcount");
1088 }
1089
1090 if (CGM.getCodeGenOpts().RecordMCount) {
1091 if (!CGM.getCodeGenOpts().CallFEntry)
1092 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1093 << "-mrecord-mcount" << "-mfentry";
1094 Fn->addFnAttr("mrecord-mcount");
1095 }
1096 }
1097 }
1098
1099 if (CGM.getCodeGenOpts().PackedStack) {
1100 if (getContext().getTargetInfo().getTriple().getArch() !=
1101 llvm::Triple::systemz)
1102 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1103 << "-mpacked-stack";
1104 Fn->addFnAttr("packed-stack");
1105 }
1106
1107 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1108 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1109 Fn->addFnAttr("warn-stack-size",
1110 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1111
1112 if (RetTy->isVoidType()) {
1113 // Void type; nothing to return.
1115
1116 // Count the implicit return.
1117 if (!endsWithReturn(D))
1118 ++NumReturnExprs;
1120 // Indirect return; emit returned value directly into sret slot.
1121 // This reduces code size, and affects correctness in C++.
1122 auto AI = CurFn->arg_begin();
1124 ++AI;
1125 ReturnValue =
1126 Address(&*AI, ConvertType(RetTy),
1130 ReturnValue.getPointer()->getType(), "result.ptr");
1132 }
1135 // Load the sret pointer from the argument struct and return into that.
1136 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1137 llvm::Function::arg_iterator EI = CurFn->arg_end();
1138 --EI;
1139 llvm::Value *Addr = Builder.CreateStructGEP(
1140 CurFnInfo->getArgStruct(), &*EI, Idx);
1141 llvm::Type *Ty =
1142 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1144 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1145 ReturnValue = Address(Addr, ConvertType(RetTy),
1147 } else {
1148 ReturnValue = CreateIRTemp(RetTy, "retval");
1149
1150 // Tell the epilog emitter to autorelease the result. We do this
1151 // now so that various specialized functions can suppress it
1152 // during their IR-generation.
1153 if (getLangOpts().ObjCAutoRefCount &&
1155 RetTy->isObjCRetainableType())
1156 AutoreleaseResult = true;
1157 }
1158
1160
1162
1163 // Emit OpenMP specific initialization of the device functions.
1164 if (getLangOpts().OpenMP && CurCodeDecl)
1166
1167 // Handle emitting HLSL entry functions.
1168 if (D && D->hasAttr<HLSLShaderAttr>())
1170
1172
1173 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1174 MD && !MD->isStatic()) {
1175 bool IsInLambda =
1176 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1179 if (IsInLambda) {
1180 // We're in a lambda; figure out the captures.
1184 // If the lambda captures the object referred to by '*this' - either by
1185 // value or by reference, make sure CXXThisValue points to the correct
1186 // object.
1187
1188 // Get the lvalue for the field (which is a copy of the enclosing object
1189 // or contains the address of the enclosing object).
1192 // If the enclosing object was captured by value, just use its address.
1193 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1194 } else {
1195 // Load the lvalue pointed to by the field, since '*this' was captured
1196 // by reference.
1197 CXXThisValue =
1198 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1199 }
1200 }
1201 for (auto *FD : MD->getParent()->fields()) {
1202 if (FD->hasCapturedVLAType()) {
1203 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1205 auto VAT = FD->getCapturedVLAType();
1206 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1207 }
1208 }
1209 } else if (MD->isImplicitObjectMemberFunction()) {
1210 // Not in a lambda; just use 'this' from the method.
1211 // FIXME: Should we generate a new load for each use of 'this'? The
1212 // fast register allocator would be happier...
1213 CXXThisValue = CXXABIThisValue;
1214 }
1215
1216 // Check the 'this' pointer once per function, if it's available.
1217 if (CXXABIThisValue) {
1218 SanitizerSet SkippedChecks;
1219 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1220 QualType ThisTy = MD->getThisType();
1221
1222 // If this is the call operator of a lambda with no captures, it
1223 // may have a static invoker function, which may call this operator with
1224 // a null 'this' pointer.
1226 SkippedChecks.set(SanitizerKind::Null, true);
1227
1229 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1230 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1231 }
1232 }
1233
1234 // If any of the arguments have a variably modified type, make sure to
1235 // emit the type size, but only if the function is not naked. Naked functions
1236 // have no prolog to run this evaluation.
1237 if (!FD || !FD->hasAttr<NakedAttr>()) {
1238 for (const VarDecl *VD : Args) {
1239 // Dig out the type as written from ParmVarDecls; it's unclear whether
1240 // the standard (C99 6.9.1p10) requires this, but we're following the
1241 // precedent set by gcc.
1242 QualType Ty;
1243 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1244 Ty = PVD->getOriginalType();
1245 else
1246 Ty = VD->getType();
1247
1248 if (Ty->isVariablyModifiedType())
1250 }
1251 }
1252 // Emit a location at the end of the prologue.
1253 if (CGDebugInfo *DI = getDebugInfo())
1254 DI->EmitLocation(Builder, StartLoc);
1255 // TODO: Do we need to handle this in two places like we do with
1256 // target-features/target-cpu?
1257 if (CurFuncDecl)
1258 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1259 LargestVectorWidth = VecWidth->getVectorWidth();
1260}
1261
1262void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1265 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1267 else
1268 EmitStmt(Body);
1269}
1270
1271/// When instrumenting to collect profile data, the counts for some blocks
1272/// such as switch cases need to not include the fall-through counts, so
1273/// emit a branch around the instrumentation code. When not instrumenting,
1274/// this just calls EmitBlock().
1275void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1276 const Stmt *S) {
1277 llvm::BasicBlock *SkipCountBB = nullptr;
1278 // Do not skip over the instrumentation when single byte coverage mode is
1279 // enabled.
1282 // When instrumenting for profiling, the fallthrough to certain
1283 // statements needs to skip over the instrumentation code so that we
1284 // get an accurate count.
1285 SkipCountBB = createBasicBlock("skipcount");
1286 EmitBranch(SkipCountBB);
1287 }
1288 EmitBlock(BB);
1289 uint64_t CurrentCount = getCurrentProfileCount();
1292 if (SkipCountBB)
1293 EmitBlock(SkipCountBB);
1294}
1295
1296/// Tries to mark the given function nounwind based on the
1297/// non-existence of any throwing calls within it. We believe this is
1298/// lightweight enough to do at -O0.
1299static void TryMarkNoThrow(llvm::Function *F) {
1300 // LLVM treats 'nounwind' on a function as part of the type, so we
1301 // can't do this on functions that can be overwritten.
1302 if (F->isInterposable()) return;
1303
1304 for (llvm::BasicBlock &BB : *F)
1305 for (llvm::Instruction &I : BB)
1306 if (I.mayThrow())
1307 return;
1308
1309 F->setDoesNotThrow();
1310}
1311
1313 FunctionArgList &Args) {
1314 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1315 QualType ResTy = FD->getReturnType();
1316
1317 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1318 if (MD && MD->isImplicitObjectMemberFunction()) {
1319 if (CGM.getCXXABI().HasThisReturn(GD))
1320 ResTy = MD->getThisType();
1321 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1322 ResTy = CGM.getContext().VoidPtrTy;
1323 CGM.getCXXABI().buildThisParam(*this, Args);
1324 }
1325
1326 // The base version of an inheriting constructor whose constructed base is a
1327 // virtual base is not passed any arguments (because it doesn't actually call
1328 // the inherited constructor).
1329 bool PassedParams = true;
1330 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1331 if (auto Inherited = CD->getInheritedConstructor())
1332 PassedParams =
1333 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1334
1335 if (PassedParams) {
1336 for (auto *Param : FD->parameters()) {
1337 Args.push_back(Param);
1338 if (!Param->hasAttr<PassObjectSizeAttr>())
1339 continue;
1340
1342 getContext(), Param->getDeclContext(), Param->getLocation(),
1343 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1344 SizeArguments[Param] = Implicit;
1345 Args.push_back(Implicit);
1346 }
1347 }
1348
1349 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1350 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1351
1352 return ResTy;
1353}
1354
1355void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1356 const CGFunctionInfo &FnInfo) {
1357 assert(Fn && "generating code for null Function");
1358 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1359 CurGD = GD;
1360
1361 FunctionArgList Args;
1362 QualType ResTy = BuildFunctionArgList(GD, Args);
1363
1364 if (FD->isInlineBuiltinDeclaration()) {
1365 // When generating code for a builtin with an inline declaration, use a
1366 // mangled name to hold the actual body, while keeping an external
1367 // definition in case the function pointer is referenced somewhere.
1368 std::string FDInlineName = (Fn->getName() + ".inline").str();
1369 llvm::Module *M = Fn->getParent();
1370 llvm::Function *Clone = M->getFunction(FDInlineName);
1371 if (!Clone) {
1372 Clone = llvm::Function::Create(Fn->getFunctionType(),
1373 llvm::GlobalValue::InternalLinkage,
1374 Fn->getAddressSpace(), FDInlineName, M);
1375 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1376 }
1377 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1378 Fn = Clone;
1379 } else {
1380 // Detect the unusual situation where an inline version is shadowed by a
1381 // non-inline version. In that case we should pick the external one
1382 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1383 // to detect that situation before we reach codegen, so do some late
1384 // replacement.
1385 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1386 PD = PD->getPreviousDecl()) {
1387 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1388 std::string FDInlineName = (Fn->getName() + ".inline").str();
1389 llvm::Module *M = Fn->getParent();
1390 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1391 Clone->replaceAllUsesWith(Fn);
1392 Clone->eraseFromParent();
1393 }
1394 break;
1395 }
1396 }
1397 }
1398
1399 // Check if we should generate debug info for this function.
1400 if (FD->hasAttr<NoDebugAttr>()) {
1401 // Clear non-distinct debug info that was possibly attached to the function
1402 // due to an earlier declaration without the nodebug attribute
1403 Fn->setSubprogram(nullptr);
1404 // Disable debug info indefinitely for this function
1405 DebugInfo = nullptr;
1406 }
1407
1408 // The function might not have a body if we're generating thunks for a
1409 // function declaration.
1410 SourceRange BodyRange;
1411 if (Stmt *Body = FD->getBody())
1412 BodyRange = Body->getSourceRange();
1413 else
1414 BodyRange = FD->getLocation();
1415 CurEHLocation = BodyRange.getEnd();
1416
1417 // Use the location of the start of the function to determine where
1418 // the function definition is located. By default use the location
1419 // of the declaration as the location for the subprogram. A function
1420 // may lack a declaration in the source code if it is created by code
1421 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1422 SourceLocation Loc = FD->getLocation();
1423
1424 // If this is a function specialization then use the pattern body
1425 // as the location for the function.
1426 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1427 if (SpecDecl->hasBody(SpecDecl))
1428 Loc = SpecDecl->getLocation();
1429
1430 Stmt *Body = FD->getBody();
1431
1432 if (Body) {
1433 // Coroutines always emit lifetime markers.
1434 if (isa<CoroutineBodyStmt>(Body))
1435 ShouldEmitLifetimeMarkers = true;
1436
1437 // Initialize helper which will detect jumps which can cause invalid
1438 // lifetime markers.
1439 if (ShouldEmitLifetimeMarkers)
1440 Bypasses.Init(Body);
1441 }
1442
1443 // Emit the standard function prologue.
1444 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1445
1446 // Save parameters for coroutine function.
1447 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1448 llvm::append_range(FnArgs, FD->parameters());
1449
1450 // Ensure that the function adheres to the forward progress guarantee, which
1451 // is required by certain optimizations.
1453 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1454
1455 // Generate the body of the function.
1456 PGO.assignRegionCounters(GD, CurFn);
1457 if (isa<CXXDestructorDecl>(FD))
1458 EmitDestructorBody(Args);
1459 else if (isa<CXXConstructorDecl>(FD))
1460 EmitConstructorBody(Args);
1461 else if (getLangOpts().CUDA &&
1462 !getLangOpts().CUDAIsDevice &&
1463 FD->hasAttr<CUDAGlobalAttr>())
1464 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1465 else if (isa<CXXMethodDecl>(FD) &&
1466 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1467 // The lambda static invoker function is special, because it forwards or
1468 // clones the body of the function call operator (but is actually static).
1469 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1470 } else if (isa<CXXMethodDecl>(FD) &&
1471 isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
1472 !FnInfo.isDelegateCall() &&
1473 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1474 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1475 // If emitting a lambda with static invoker on X86 Windows, change
1476 // the call operator body.
1477 // Make sure that this is a call operator with an inalloca arg and check
1478 // for delegate call to make sure this is the original call op and not the
1479 // new forwarding function for the static invoker.
1480 EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
1481 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1482 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1483 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1484 // Implicit copy-assignment gets the same special treatment as implicit
1485 // copy-constructors.
1487 } else if (Body) {
1488 EmitFunctionBody(Body);
1489 } else
1490 llvm_unreachable("no definition for emitted function");
1491
1492 // C++11 [stmt.return]p2:
1493 // Flowing off the end of a function [...] results in undefined behavior in
1494 // a value-returning function.
1495 // C11 6.9.1p12:
1496 // If the '}' that terminates a function is reached, and the value of the
1497 // function call is used by the caller, the behavior is undefined.
1499 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1500 bool ShouldEmitUnreachable =
1501 CGM.getCodeGenOpts().StrictReturn ||
1503 if (SanOpts.has(SanitizerKind::Return)) {
1504 SanitizerScope SanScope(this);
1505 llvm::Value *IsFalse = Builder.getFalse();
1506 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1507 SanitizerHandler::MissingReturn,
1508 EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1509 } else if (ShouldEmitUnreachable) {
1510 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1511 EmitTrapCall(llvm::Intrinsic::trap);
1512 }
1513 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1514 Builder.CreateUnreachable();
1515 Builder.ClearInsertionPoint();
1516 }
1517 }
1518
1519 // Emit the standard function epilogue.
1520 FinishFunction(BodyRange.getEnd());
1521
1522 // If we haven't marked the function nothrow through other means, do
1523 // a quick pass now to see if we can.
1524 if (!CurFn->doesNotThrow())
1526}
1527
1528/// ContainsLabel - Return true if the statement contains a label in it. If
1529/// this statement is not executed normally, it not containing a label means
1530/// that we can just remove the code.
1531bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1532 // Null statement, not a label!
1533 if (!S) return false;
1534
1535 // If this is a label, we have to emit the code, consider something like:
1536 // if (0) { ... foo: bar(); } goto foo;
1537 //
1538 // TODO: If anyone cared, we could track __label__'s, since we know that you
1539 // can't jump to one from outside their declared region.
1540 if (isa<LabelStmt>(S))
1541 return true;
1542
1543 // If this is a case/default statement, and we haven't seen a switch, we have
1544 // to emit the code.
1545 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1546 return true;
1547
1548 // If this is a switch statement, we want to ignore cases below it.
1549 if (isa<SwitchStmt>(S))
1550 IgnoreCaseStmts = true;
1551
1552 // Scan subexpressions for verboten labels.
1553 for (const Stmt *SubStmt : S->children())
1554 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1555 return true;
1556
1557 return false;
1558}
1559
1560/// containsBreak - Return true if the statement contains a break out of it.
1561/// If the statement (recursively) contains a switch or loop with a break
1562/// inside of it, this is fine.
1563bool CodeGenFunction::containsBreak(const Stmt *S) {
1564 // Null statement, not a label!
1565 if (!S) return false;
1566
1567 // If this is a switch or loop that defines its own break scope, then we can
1568 // include it and anything inside of it.
1569 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1570 isa<ForStmt>(S))
1571 return false;
1572
1573 if (isa<BreakStmt>(S))
1574 return true;
1575
1576 // Scan subexpressions for verboten breaks.
1577 for (const Stmt *SubStmt : S->children())
1578 if (containsBreak(SubStmt))
1579 return true;
1580
1581 return false;
1582}
1583
1585 if (!S) return false;
1586
1587 // Some statement kinds add a scope and thus never add a decl to the current
1588 // scope. Note, this list is longer than the list of statements that might
1589 // have an unscoped decl nested within them, but this way is conservatively
1590 // correct even if more statement kinds are added.
1591 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1592 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1593 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1594 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1595 return false;
1596
1597 if (isa<DeclStmt>(S))
1598 return true;
1599
1600 for (const Stmt *SubStmt : S->children())
1601 if (mightAddDeclToScope(SubStmt))
1602 return true;
1603
1604 return false;
1605}
1606
1607/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1608/// to a constant, or if it does but contains a label, return false. If it
1609/// constant folds return true and set the boolean result in Result.
1611 bool &ResultBool,
1612 bool AllowLabels) {
1613 // If MC/DC is enabled, disable folding so that we can instrument all
1614 // conditions to yield complete test vectors. We still keep track of
1615 // folded conditions during region mapping and visualization.
1616 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1617 CGM.getCodeGenOpts().MCDCCoverage)
1618 return false;
1619
1620 llvm::APSInt ResultInt;
1621 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1622 return false;
1623
1624 ResultBool = ResultInt.getBoolValue();
1625 return true;
1626}
1627
1628/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1629/// to a constant, or if it does but contains a label, return false. If it
1630/// constant folds return true and set the folded value.
1632 llvm::APSInt &ResultInt,
1633 bool AllowLabels) {
1634 // FIXME: Rename and handle conversion of other evaluatable things
1635 // to bool.
1637 if (!Cond->EvaluateAsInt(Result, getContext()))
1638 return false; // Not foldable, not integer or not fully evaluatable.
1639
1640 llvm::APSInt Int = Result.Val.getInt();
1641 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1642 return false; // Contains a label.
1643
1644 ResultInt = Int;
1645 return true;
1646}
1647
1648/// Strip parentheses and simplistic logical-NOT operators.
1649const Expr *CodeGenFunction::stripCond(const Expr *C) {
1650 while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
1651 if (Op->getOpcode() != UO_LNot)
1652 break;
1653 C = Op->getSubExpr();
1654 }
1655 return C->IgnoreParens();
1656}
1657
1658/// Determine whether the given condition is an instrumentable condition
1659/// (i.e. no "&&" or "||").
1661 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1662 return (!BOp || !BOp->isLogicalOp());
1663}
1664
1665/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1666/// increments a profile counter based on the semantics of the given logical
1667/// operator opcode. This is used to instrument branch condition coverage for
1668/// logical operators.
1670 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1671 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1672 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1673 // If not instrumenting, just emit a branch.
1674 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1675 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1676 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1677
1678 llvm::BasicBlock *ThenBlock = nullptr;
1679 llvm::BasicBlock *ElseBlock = nullptr;
1680 llvm::BasicBlock *NextBlock = nullptr;
1681
1682 // Create the block we'll use to increment the appropriate counter.
1683 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1684
1685 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1686 // means we need to evaluate the condition and increment the counter on TRUE:
1687 //
1688 // if (Cond)
1689 // goto CounterIncrBlock;
1690 // else
1691 // goto FalseBlock;
1692 //
1693 // CounterIncrBlock:
1694 // Counter++;
1695 // goto TrueBlock;
1696
1697 if (LOp == BO_LAnd) {
1698 ThenBlock = CounterIncrBlock;
1699 ElseBlock = FalseBlock;
1700 NextBlock = TrueBlock;
1701 }
1702
1703 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1704 // we need to evaluate the condition and increment the counter on FALSE:
1705 //
1706 // if (Cond)
1707 // goto TrueBlock;
1708 // else
1709 // goto CounterIncrBlock;
1710 //
1711 // CounterIncrBlock:
1712 // Counter++;
1713 // goto FalseBlock;
1714
1715 else if (LOp == BO_LOr) {
1716 ThenBlock = TrueBlock;
1717 ElseBlock = CounterIncrBlock;
1718 NextBlock = FalseBlock;
1719 } else {
1720 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1721 }
1722
1723 // Emit Branch based on condition.
1724 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1725
1726 // Emit the block containing the counter increment(s).
1727 EmitBlock(CounterIncrBlock);
1728
1729 // Increment corresponding counter; if index not provided, use Cond as index.
1730 incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1731
1732 // Go to the next block.
1733 EmitBranch(NextBlock);
1734}
1735
1736/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1737/// statement) to the specified blocks. Based on the condition, this might try
1738/// to simplify the codegen of the conditional based on the branch.
1739/// \param LH The value of the likelihood attribute on the True branch.
1740/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1741/// ConditionalOperator (ternary) through a recursive call for the operator's
1742/// LHS and RHS nodes.
1744 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1745 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp) {
1746 Cond = Cond->IgnoreParens();
1747
1748 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1749 // Handle X && Y in a condition.
1750 if (CondBOp->getOpcode() == BO_LAnd) {
1751 MCDCLogOpStack.push_back(CondBOp);
1752
1753 // If we have "1 && X", simplify the code. "0 && X" would have constant
1754 // folded if the case was simple enough.
1755 bool ConstantBool = false;
1756 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1757 ConstantBool) {
1758 // br(1 && X) -> br(X).
1759 incrementProfileCounter(CondBOp);
1760 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1761 FalseBlock, TrueCount, LH);
1762 MCDCLogOpStack.pop_back();
1763 return;
1764 }
1765
1766 // If we have "X && 1", simplify the code to use an uncond branch.
1767 // "X && 0" would have been constant folded to 0.
1768 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1769 ConstantBool) {
1770 // br(X && 1) -> br(X).
1771 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1772 FalseBlock, TrueCount, LH, CondBOp);
1773 MCDCLogOpStack.pop_back();
1774 return;
1775 }
1776
1777 // Emit the LHS as a conditional. If the LHS conditional is false, we
1778 // want to jump to the FalseBlock.
1779 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1780 // The counter tells us how often we evaluate RHS, and all of TrueCount
1781 // can be propagated to that branch.
1782 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1783
1784 ConditionalEvaluation eval(*this);
1785 {
1786 ApplyDebugLocation DL(*this, Cond);
1787 // Propagate the likelihood attribute like __builtin_expect
1788 // __builtin_expect(X && Y, 1) -> X and Y are likely
1789 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1790 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1791 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1792 EmitBlock(LHSTrue);
1793 }
1794
1795 incrementProfileCounter(CondBOp);
1796 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1797
1798 // Any temporaries created here are conditional.
1799 eval.begin(*this);
1800 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1801 FalseBlock, TrueCount, LH);
1802 eval.end(*this);
1803 MCDCLogOpStack.pop_back();
1804 return;
1805 }
1806
1807 if (CondBOp->getOpcode() == BO_LOr) {
1808 MCDCLogOpStack.push_back(CondBOp);
1809
1810 // If we have "0 || X", simplify the code. "1 || X" would have constant
1811 // folded if the case was simple enough.
1812 bool ConstantBool = false;
1813 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1814 !ConstantBool) {
1815 // br(0 || X) -> br(X).
1816 incrementProfileCounter(CondBOp);
1817 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1818 FalseBlock, TrueCount, LH);
1819 MCDCLogOpStack.pop_back();
1820 return;
1821 }
1822
1823 // If we have "X || 0", simplify the code to use an uncond branch.
1824 // "X || 1" would have been constant folded to 1.
1825 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1826 !ConstantBool) {
1827 // br(X || 0) -> br(X).
1828 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1829 FalseBlock, TrueCount, LH, CondBOp);
1830 MCDCLogOpStack.pop_back();
1831 return;
1832 }
1833 // Emit the LHS as a conditional. If the LHS conditional is true, we
1834 // want to jump to the TrueBlock.
1835 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1836 // We have the count for entry to the RHS and for the whole expression
1837 // being true, so we can divy up True count between the short circuit and
1838 // the RHS.
1839 uint64_t LHSCount =
1840 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1841 uint64_t RHSCount = TrueCount - LHSCount;
1842
1843 ConditionalEvaluation eval(*this);
1844 {
1845 // Propagate the likelihood attribute like __builtin_expect
1846 // __builtin_expect(X || Y, 1) -> only Y is likely
1847 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1848 ApplyDebugLocation DL(*this, Cond);
1849 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1850 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1851 EmitBlock(LHSFalse);
1852 }
1853
1854 incrementProfileCounter(CondBOp);
1855 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1856
1857 // Any temporaries created here are conditional.
1858 eval.begin(*this);
1859 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1860 RHSCount, LH);
1861
1862 eval.end(*this);
1863 MCDCLogOpStack.pop_back();
1864 return;
1865 }
1866 }
1867
1868 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1869 // br(!x, t, f) -> br(x, f, t)
1870 // Avoid doing this optimization when instrumenting a condition for MC/DC.
1871 // LNot is taken as part of the condition for simplicity, and changing its
1872 // sense negatively impacts test vector tracking.
1873 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
1874 CGM.getCodeGenOpts().MCDCCoverage &&
1876 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
1877 // Negate the count.
1878 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1879 // The values of the enum are chosen to make this negation possible.
1880 LH = static_cast<Stmt::Likelihood>(-LH);
1881 // Negate the condition and swap the destination blocks.
1882 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1883 FalseCount, LH);
1884 }
1885 }
1886
1887 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1888 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1889 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1890 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1891
1892 // The ConditionalOperator itself has no likelihood information for its
1893 // true and false branches. This matches the behavior of __builtin_expect.
1894 ConditionalEvaluation cond(*this);
1895 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1897
1898 // When computing PGO branch weights, we only know the overall count for
1899 // the true block. This code is essentially doing tail duplication of the
1900 // naive code-gen, introducing new edges for which counts are not
1901 // available. Divide the counts proportionally between the LHS and RHS of
1902 // the conditional operator.
1903 uint64_t LHSScaledTrueCount = 0;
1904 if (TrueCount) {
1905 double LHSRatio =
1907 LHSScaledTrueCount = TrueCount * LHSRatio;
1908 }
1909
1910 cond.begin(*this);
1911 EmitBlock(LHSBlock);
1913 {
1914 ApplyDebugLocation DL(*this, Cond);
1915 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1916 LHSScaledTrueCount, LH, CondOp);
1917 }
1918 cond.end(*this);
1919
1920 cond.begin(*this);
1921 EmitBlock(RHSBlock);
1922 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1923 TrueCount - LHSScaledTrueCount, LH, CondOp);
1924 cond.end(*this);
1925
1926 return;
1927 }
1928
1929 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1930 // Conditional operator handling can give us a throw expression as a
1931 // condition for a case like:
1932 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1933 // Fold this to:
1934 // br(c, throw x, br(y, t, f))
1935 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1936 return;
1937 }
1938
1939 // Emit the code with the fully general case.
1940 llvm::Value *CondV;
1941 {
1942 ApplyDebugLocation DL(*this, Cond);
1943 CondV = EvaluateExprAsBool(Cond);
1944 }
1945
1946 // If not at the top of the logical operator nest, update MCDC temp with the
1947 // boolean result of the evaluated condition.
1948 if (!MCDCLogOpStack.empty()) {
1949 const Expr *MCDCBaseExpr = Cond;
1950 // When a nested ConditionalOperator (ternary) is encountered in a boolean
1951 // expression, MC/DC tracks the result of the ternary, and this is tied to
1952 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
1953 // this is the case, the ConditionalOperator expression is passed through
1954 // the ConditionalOp parameter and then used as the MCDC base expression.
1955 if (ConditionalOp)
1956 MCDCBaseExpr = ConditionalOp;
1957
1958 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
1959 }
1960
1961 llvm::MDNode *Weights = nullptr;
1962 llvm::MDNode *Unpredictable = nullptr;
1963
1964 // If the branch has a condition wrapped by __builtin_unpredictable,
1965 // create metadata that specifies that the branch is unpredictable.
1966 // Don't bother if not optimizing because that metadata would not be used.
1967 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1968 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1969 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1970 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1971 llvm::MDBuilder MDHelper(getLLVMContext());
1972 Unpredictable = MDHelper.createUnpredictable();
1973 }
1974 }
1975
1976 // If there is a Likelihood knowledge for the cond, lower it.
1977 // Note that if not optimizing this won't emit anything.
1978 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1979 if (CondV != NewCondV)
1980 CondV = NewCondV;
1981 else {
1982 // Otherwise, lower profile counts. Note that we do this even at -O0.
1983 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1984 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1985 }
1986
1987 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1988}
1989
1990/// ErrorUnsupported - Print out an error that codegen doesn't support the
1991/// specified stmt yet.
1992void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1994}
1995
1996/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1997/// variable-length array whose elements have a non-zero bit-pattern.
1998///
1999/// \param baseType the inner-most element type of the array
2000/// \param src - a char* pointing to the bit-pattern for a single
2001/// base element of the array
2002/// \param sizeInChars - the total size of the VLA, in chars
2004 Address dest, Address src,
2005 llvm::Value *sizeInChars) {
2007
2008 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2009 llvm::Value *baseSizeInChars
2010 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2011
2012 Address begin = dest.withElementType(CGF.Int8Ty);
2013 llvm::Value *end = Builder.CreateInBoundsGEP(
2014 begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
2015
2016 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2017 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2018 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2019
2020 // Make a loop over the VLA. C99 guarantees that the VLA element
2021 // count must be nonzero.
2022 CGF.EmitBlock(loopBB);
2023
2024 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2025 cur->addIncoming(begin.getPointer(), originBB);
2026
2027 CharUnits curAlign =
2028 dest.getAlignment().alignmentOfArrayElement(baseSize);
2029
2030 // memcpy the individual element bit-pattern.
2031 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2032 /*volatile*/ false);
2033
2034 // Go to the next element.
2035 llvm::Value *next =
2036 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2037
2038 // Leave if that's the end of the VLA.
2039 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2040 Builder.CreateCondBr(done, contBB, loopBB);
2041 cur->addIncoming(next, loopBB);
2042
2043 CGF.EmitBlock(contBB);
2044}
2045
2046void
2048 // Ignore empty classes in C++.
2049 if (getLangOpts().CPlusPlus) {
2050 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2051 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
2052 return;
2053 }
2054 }
2055
2056 if (DestPtr.getElementType() != Int8Ty)
2057 DestPtr = DestPtr.withElementType(Int8Ty);
2058
2059 // Get size and alignment info for this aggregate.
2061
2062 llvm::Value *SizeVal;
2063 const VariableArrayType *vla;
2064
2065 // Don't bother emitting a zero-byte memset.
2066 if (size.isZero()) {
2067 // But note that getTypeInfo returns 0 for a VLA.
2068 if (const VariableArrayType *vlaType =
2069 dyn_cast_or_null<VariableArrayType>(
2070 getContext().getAsArrayType(Ty))) {
2071 auto VlaSize = getVLASize(vlaType);
2072 SizeVal = VlaSize.NumElts;
2073 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2074 if (!eltSize.isOne())
2075 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2076 vla = vlaType;
2077 } else {
2078 return;
2079 }
2080 } else {
2081 SizeVal = CGM.getSize(size);
2082 vla = nullptr;
2083 }
2084
2085 // If the type contains a pointer to data member we can't memset it to zero.
2086 // Instead, create a null constant and copy it to the destination.
2087 // TODO: there are other patterns besides zero that we can usefully memset,
2088 // like -1, which happens to be the pattern used by member-pointers.
2089 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2090 // For a VLA, emit a single element, then splat that over the VLA.
2091 if (vla) Ty = getContext().getBaseElementType(vla);
2092
2093 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2094
2095 llvm::GlobalVariable *NullVariable =
2096 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2097 /*isConstant=*/true,
2098 llvm::GlobalVariable::PrivateLinkage,
2099 NullConstant, Twine());
2100 CharUnits NullAlign = DestPtr.getAlignment();
2101 NullVariable->setAlignment(NullAlign.getAsAlign());
2102 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2103
2104 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2105
2106 // Get and call the appropriate llvm.memcpy overload.
2107 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2108 return;
2109 }
2110
2111 // Otherwise, just memset the whole thing to zero. This is legal
2112 // because in LLVM, all default initializers (other than the ones we just
2113 // handled above) are guaranteed to have a bit pattern of all zeros.
2114 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2115}
2116
2117llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2118 // Make sure that there is a block for the indirect goto.
2119 if (!IndirectBranch)
2121
2122 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2123
2124 // Make sure the indirect branch includes all of the address-taken blocks.
2125 IndirectBranch->addDestination(BB);
2126 return llvm::BlockAddress::get(CurFn, BB);
2127}
2128
2129llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2130 // If we already made the indirect branch for indirect goto, return its block.
2131 if (IndirectBranch) return IndirectBranch->getParent();
2132
2133 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2134
2135 // Create the PHI node that indirect gotos will add entries to.
2136 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2137 "indirect.goto.dest");
2138
2139 // Create the indirect branch instruction.
2140 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2141 return IndirectBranch->getParent();
2142}
2143
2144/// Computes the length of an array in elements, as well as the base
2145/// element type and a properly-typed first element pointer.
2146llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2147 QualType &baseType,
2148 Address &addr) {
2149 const ArrayType *arrayType = origArrayType;
2150
2151 // If it's a VLA, we have to load the stored size. Note that
2152 // this is the size of the VLA in bytes, not its size in elements.
2153 llvm::Value *numVLAElements = nullptr;
2154 if (isa<VariableArrayType>(arrayType)) {
2155 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2156
2157 // Walk into all VLAs. This doesn't require changes to addr,
2158 // which has type T* where T is the first non-VLA element type.
2159 do {
2160 QualType elementType = arrayType->getElementType();
2161 arrayType = getContext().getAsArrayType(elementType);
2162
2163 // If we only have VLA components, 'addr' requires no adjustment.
2164 if (!arrayType) {
2165 baseType = elementType;
2166 return numVLAElements;
2167 }
2168 } while (isa<VariableArrayType>(arrayType));
2169
2170 // We get out here only if we find a constant array type
2171 // inside the VLA.
2172 }
2173
2174 // We have some number of constant-length arrays, so addr should
2175 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2176 // down to the first element of addr.
2178
2179 // GEP down to the array type.
2180 llvm::ConstantInt *zero = Builder.getInt32(0);
2181 gepIndices.push_back(zero);
2182
2183 uint64_t countFromCLAs = 1;
2184 QualType eltType;
2185
2186 llvm::ArrayType *llvmArrayType =
2187 dyn_cast<llvm::ArrayType>(addr.getElementType());
2188 while (llvmArrayType) {
2189 assert(isa<ConstantArrayType>(arrayType));
2190 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2191 == llvmArrayType->getNumElements());
2192
2193 gepIndices.push_back(zero);
2194 countFromCLAs *= llvmArrayType->getNumElements();
2195 eltType = arrayType->getElementType();
2196
2197 llvmArrayType =
2198 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2199 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2200 assert((!llvmArrayType || arrayType) &&
2201 "LLVM and Clang types are out-of-synch");
2202 }
2203
2204 if (arrayType) {
2205 // From this point onwards, the Clang array type has been emitted
2206 // as some other type (probably a packed struct). Compute the array
2207 // size, and just emit the 'begin' expression as a bitcast.
2208 while (arrayType) {
2209 countFromCLAs *=
2210 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2211 eltType = arrayType->getElementType();
2212 arrayType = getContext().getAsArrayType(eltType);
2213 }
2214
2215 llvm::Type *baseType = ConvertType(eltType);
2216 addr = addr.withElementType(baseType);
2217 } else {
2218 // Create the actual GEP.
2219 addr = Address(Builder.CreateInBoundsGEP(
2220 addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2221 ConvertTypeForMem(eltType),
2222 addr.getAlignment());
2223 }
2224
2225 baseType = eltType;
2226
2227 llvm::Value *numElements
2228 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2229
2230 // If we had any VLA dimensions, factor them in.
2231 if (numVLAElements)
2232 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2233
2234 return numElements;
2235}
2236
2237CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2239 assert(vla && "type was not a variable array type!");
2240 return getVLASize(vla);
2241}
2242
2243CodeGenFunction::VlaSizePair
2245 // The number of elements so far; always size_t.
2246 llvm::Value *numElements = nullptr;
2247
2248 QualType elementType;
2249 do {
2250 elementType = type->getElementType();
2251 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2252 assert(vlaSize && "no size for VLA!");
2253 assert(vlaSize->getType() == SizeTy);
2254
2255 if (!numElements) {
2256 numElements = vlaSize;
2257 } else {
2258 // It's undefined behavior if this wraps around, so mark it that way.
2259 // FIXME: Teach -fsanitize=undefined to trap this.
2260 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2261 }
2262 } while ((type = getContext().getAsVariableArrayType(elementType)));
2263
2264 return { numElements, elementType };
2265}
2266
2267CodeGenFunction::VlaSizePair
2270 assert(vla && "type was not a variable array type!");
2271 return getVLAElements1D(vla);
2272}
2273
2274CodeGenFunction::VlaSizePair
2276 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2277 assert(VlaSize && "no size for VLA!");
2278 assert(VlaSize->getType() == SizeTy);
2279 return { VlaSize, Vla->getElementType() };
2280}
2281
2283 assert(type->isVariablyModifiedType() &&
2284 "Must pass variably modified type to EmitVLASizes!");
2285
2287
2288 // We're going to walk down into the type and look for VLA
2289 // expressions.
2290 do {
2291 assert(type->isVariablyModifiedType());
2292
2293 const Type *ty = type.getTypePtr();
2294 switch (ty->getTypeClass()) {
2295
2296#define TYPE(Class, Base)
2297#define ABSTRACT_TYPE(Class, Base)
2298#define NON_CANONICAL_TYPE(Class, Base)
2299#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2300#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2301#include "clang/AST/TypeNodes.inc"
2302 llvm_unreachable("unexpected dependent type!");
2303
2304 // These types are never variably-modified.
2305 case Type::Builtin:
2306 case Type::Complex:
2307 case Type::Vector:
2308 case Type::ExtVector:
2309 case Type::ConstantMatrix:
2310 case Type::Record:
2311 case Type::Enum:
2312 case Type::Using:
2313 case Type::TemplateSpecialization:
2314 case Type::ObjCTypeParam:
2315 case Type::ObjCObject:
2316 case Type::ObjCInterface:
2317 case Type::ObjCObjectPointer:
2318 case Type::BitInt:
2319 llvm_unreachable("type class is never variably-modified!");
2320
2321 case Type::Elaborated:
2322 type = cast<ElaboratedType>(ty)->getNamedType();
2323 break;
2324
2325 case Type::Adjusted:
2326 type = cast<AdjustedType>(ty)->getAdjustedType();
2327 break;
2328
2329 case Type::Decayed:
2330 type = cast<DecayedType>(ty)->getPointeeType();
2331 break;
2332
2333 case Type::Pointer:
2334 type = cast<PointerType>(ty)->getPointeeType();
2335 break;
2336
2337 case Type::BlockPointer:
2338 type = cast<BlockPointerType>(ty)->getPointeeType();
2339 break;
2340
2341 case Type::LValueReference:
2342 case Type::RValueReference:
2343 type = cast<ReferenceType>(ty)->getPointeeType();
2344 break;
2345
2346 case Type::MemberPointer:
2347 type = cast<MemberPointerType>(ty)->getPointeeType();
2348 break;
2349
2350 case Type::ConstantArray:
2351 case Type::IncompleteArray:
2352 // Losing element qualification here is fine.
2353 type = cast<ArrayType>(ty)->getElementType();
2354 break;
2355
2356 case Type::VariableArray: {
2357 // Losing element qualification here is fine.
2358 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2359
2360 // Unknown size indication requires no size computation.
2361 // Otherwise, evaluate and record it.
2362 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2363 // It's possible that we might have emitted this already,
2364 // e.g. with a typedef and a pointer to it.
2365 llvm::Value *&entry = VLASizeMap[sizeExpr];
2366 if (!entry) {
2367 llvm::Value *size = EmitScalarExpr(sizeExpr);
2368
2369 // C11 6.7.6.2p5:
2370 // If the size is an expression that is not an integer constant
2371 // expression [...] each time it is evaluated it shall have a value
2372 // greater than zero.
2373 if (SanOpts.has(SanitizerKind::VLABound)) {
2374 SanitizerScope SanScope(this);
2375 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2376 clang::QualType SEType = sizeExpr->getType();
2377 llvm::Value *CheckCondition =
2378 SEType->isSignedIntegerType()
2379 ? Builder.CreateICmpSGT(size, Zero)
2380 : Builder.CreateICmpUGT(size, Zero);
2381 llvm::Constant *StaticArgs[] = {
2382 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2383 EmitCheckTypeDescriptor(SEType)};
2384 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2385 SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2386 }
2387
2388 // Always zexting here would be wrong if it weren't
2389 // undefined behavior to have a negative bound.
2390 // FIXME: What about when size's type is larger than size_t?
2391 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2392 }
2393 }
2394 type = vat->getElementType();
2395 break;
2396 }
2397
2398 case Type::FunctionProto:
2399 case Type::FunctionNoProto:
2400 type = cast<FunctionType>(ty)->getReturnType();
2401 break;
2402
2403 case Type::Paren:
2404 case Type::TypeOf:
2405 case Type::UnaryTransform:
2406 case Type::Attributed:
2407 case Type::BTFTagAttributed:
2408 case Type::SubstTemplateTypeParm:
2409 case Type::MacroQualified:
2410 // Keep walking after single level desugaring.
2411 type = type.getSingleStepDesugaredType(getContext());
2412 break;
2413
2414 case Type::Typedef:
2415 case Type::Decltype:
2416 case Type::Auto:
2417 case Type::DeducedTemplateSpecialization:
2418 case Type::PackIndexing:
2419 // Stop walking: nothing to do.
2420 return;
2421
2422 case Type::TypeOfExpr:
2423 // Stop walking: emit typeof expression.
2424 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2425 return;
2426
2427 case Type::Atomic:
2428 type = cast<AtomicType>(ty)->getValueType();
2429 break;
2430
2431 case Type::Pipe:
2432 type = cast<PipeType>(ty)->getElementType();
2433 break;
2434 }
2435 } while (type->isVariablyModifiedType());
2436}
2437
2439 if (getContext().getBuiltinVaListType()->isArrayType())
2440 return EmitPointerWithAlignment(E);
2441 return EmitLValue(E).getAddress(*this);
2442}
2443
2445 return EmitLValue(E).getAddress(*this);
2446}
2447
2449 const APValue &Init) {
2450 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2451 if (CGDebugInfo *Dbg = getDebugInfo())
2453 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2454}
2455
2456CodeGenFunction::PeepholeProtection
2458 // At the moment, the only aggressive peephole we do in IR gen
2459 // is trunc(zext) folding, but if we add more, we can easily
2460 // extend this protection.
2461
2462 if (!rvalue.isScalar()) return PeepholeProtection();
2463 llvm::Value *value = rvalue.getScalarVal();
2464 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2465
2466 // Just make an extra bitcast.
2467 assert(HaveInsertPoint());
2468 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2469 Builder.GetInsertBlock());
2470
2471 PeepholeProtection protection;
2472 protection.Inst = inst;
2473 return protection;
2474}
2475
2476void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2477 if (!protection.Inst) return;
2478
2479 // In theory, we could try to duplicate the peepholes now, but whatever.
2480 protection.Inst->eraseFromParent();
2481}
2482
2483void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2484 QualType Ty, SourceLocation Loc,
2485 SourceLocation AssumptionLoc,
2486 llvm::Value *Alignment,
2487 llvm::Value *OffsetValue) {
2488 if (Alignment->getType() != IntPtrTy)
2489 Alignment =
2490 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2491 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2492 OffsetValue =
2493 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2494 llvm::Value *TheCheck = nullptr;
2495 if (SanOpts.has(SanitizerKind::Alignment)) {
2496 llvm::Value *PtrIntValue =
2497 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2498
2499 if (OffsetValue) {
2500 bool IsOffsetZero = false;
2501 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2502 IsOffsetZero = CI->isZero();
2503
2504 if (!IsOffsetZero)
2505 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2506 }
2507
2508 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2509 llvm::Value *Mask =
2510 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2511 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2512 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2513 }
2514 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2515 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2516
2517 if (!SanOpts.has(SanitizerKind::Alignment))
2518 return;
2519 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2520 OffsetValue, TheCheck, Assumption);
2521}
2522
2523void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2524 const Expr *E,
2525 SourceLocation AssumptionLoc,
2526 llvm::Value *Alignment,
2527 llvm::Value *OffsetValue) {
2528 QualType Ty = E->getType();
2529 SourceLocation Loc = E->getExprLoc();
2530
2531 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2532 OffsetValue);
2533}
2534
2535llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2536 llvm::Value *AnnotatedVal,
2537 StringRef AnnotationStr,
2538 SourceLocation Location,
2539 const AnnotateAttr *Attr) {
2541 AnnotatedVal,
2542 CGM.EmitAnnotationString(AnnotationStr),
2543 CGM.EmitAnnotationUnit(Location),
2544 CGM.EmitAnnotationLineNo(Location),
2545 };
2546 if (Attr)
2547 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2548 return Builder.CreateCall(AnnotationFn, Args);
2549}
2550
2551void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2552 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2553 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2554 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2555 {V->getType(), CGM.ConstGlobalsPtrTy}),
2556 V, I->getAnnotation(), D->getLocation(), I);
2557}
2558
2560 Address Addr) {
2561 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2562 llvm::Value *V = Addr.getPointer();
2563 llvm::Type *VTy = V->getType();
2564 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2565 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2566 llvm::PointerType *IntrinTy =
2567 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2568 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2569 {IntrinTy, CGM.ConstGlobalsPtrTy});
2570
2571 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2572 // FIXME Always emit the cast inst so we can differentiate between
2573 // annotation on the first field of a struct and annotation on the struct
2574 // itself.
2575 if (VTy != IntrinTy)
2576 V = Builder.CreateBitCast(V, IntrinTy);
2577 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2578 V = Builder.CreateBitCast(V, VTy);
2579 }
2580
2581 return Address(V, Addr.getElementType(), Addr.getAlignment());
2582}
2583
2585
2587 : CGF(CGF) {
2588 assert(!CGF->IsSanitizerScope);
2589 CGF->IsSanitizerScope = true;
2590}
2591
2593 CGF->IsSanitizerScope = false;
2594}
2595
2596void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2597 const llvm::Twine &Name,
2598 llvm::BasicBlock *BB,
2599 llvm::BasicBlock::iterator InsertPt) const {
2601 if (IsSanitizerScope)
2602 I->setNoSanitizeMetadata();
2603}
2604
2606 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2607 llvm::BasicBlock::iterator InsertPt) const {
2608 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2609 if (CGF)
2610 CGF->InsertHelper(I, Name, BB, InsertPt);
2611}
2612
2613// Emits an error if we don't have a valid set of target features for the
2614// called function.
2616 const FunctionDecl *TargetDecl) {
2617 // SemaChecking cannot handle below x86 builtins because they have different
2618 // parameter ranges with different TargetAttribute of caller.
2619 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2620 unsigned BuiltinID = TargetDecl->getBuiltinID();
2621 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2622 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2623 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2624 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2625 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2626 llvm::StringMap<bool> TargetFetureMap;
2627 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2628 llvm::APSInt Result =
2630 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2631 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2632 << TargetDecl->getDeclName() << "avx";
2633 }
2634 }
2635 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2636}
2637
2638// Emits an error if we don't have a valid set of target features for the
2639// called function.
2641 const FunctionDecl *TargetDecl) {
2642 // Early exit if this is an indirect call.
2643 if (!TargetDecl)
2644 return;
2645
2646 // Get the current enclosing function if it exists. If it doesn't
2647 // we can't check the target features anyhow.
2648 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2649 if (!FD)
2650 return;
2651
2652 // Grab the required features for the call. For a builtin this is listed in
2653 // the td file with the default cpu, for an always_inline function this is any
2654 // listed cpu and any listed features.
2655 unsigned BuiltinID = TargetDecl->getBuiltinID();
2656 std::string MissingFeature;
2657 llvm::StringMap<bool> CallerFeatureMap;
2658 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2659 // When compiling in HipStdPar mode we have to be conservative in rejecting
2660 // target specific features in the FE, and defer the possible error to the
2661 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2662 // referenced by an accelerator executable function, we emit an error.
2663 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2664 if (BuiltinID) {
2665 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2667 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2668 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2669 << TargetDecl->getDeclName()
2670 << FeatureList;
2671 }
2672 } else if (!TargetDecl->isMultiVersion() &&
2673 TargetDecl->hasAttr<TargetAttr>()) {
2674 // Get the required features for the callee.
2675
2676 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2679
2680 SmallVector<StringRef, 1> ReqFeatures;
2681 llvm::StringMap<bool> CalleeFeatureMap;
2682 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2683
2684 for (const auto &F : ParsedAttr.Features) {
2685 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2686 ReqFeatures.push_back(StringRef(F).substr(1));
2687 }
2688
2689 for (const auto &F : CalleeFeatureMap) {
2690 // Only positive features are "required".
2691 if (F.getValue())
2692 ReqFeatures.push_back(F.getKey());
2693 }
2694 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2695 if (!CallerFeatureMap.lookup(Feature)) {
2696 MissingFeature = Feature.str();
2697 return false;
2698 }
2699 return true;
2700 }) && !IsHipStdPar)
2701 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2702 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2703 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2704 llvm::StringMap<bool> CalleeFeatureMap;
2705 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2706
2707 for (const auto &F : CalleeFeatureMap) {
2708 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2709 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2710 !IsHipStdPar)
2711 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2712 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2713 }
2714 }
2715}
2716
2717void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2718 if (!CGM.getCodeGenOpts().SanitizeStats)
2719 return;
2720
2721 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2722 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2723 CGM.getSanStats().create(IRB, SSK);
2724}
2725
2727 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2728 const FunctionProtoType *FP =
2729 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2730 if (FP)
2731 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2732}
2733
2734llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2735 const MultiVersionResolverOption &RO) {
2737 for (const StringRef &Feature : RO.Conditions.Features) {
2738 // Form condition for features which are not yet enabled in target
2739 if (!getContext().getTargetInfo().hasFeature(Feature))
2740 CondFeatures.push_back(Feature);
2741 }
2742 if (!CondFeatures.empty()) {
2743 return EmitAArch64CpuSupports(CondFeatures);
2744 }
2745 return nullptr;
2746}
2747
2748llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2749 const MultiVersionResolverOption &RO) {
2750 llvm::Value *Condition = nullptr;
2751
2752 if (!RO.Conditions.Architecture.empty()) {
2753 StringRef Arch = RO.Conditions.Architecture;
2754 // If arch= specifies an x86-64 micro-architecture level, test the feature
2755 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2756 if (Arch.starts_with("x86-64"))
2757 Condition = EmitX86CpuSupports({Arch});
2758 else
2759 Condition = EmitX86CpuIs(Arch);
2760 }
2761
2762 if (!RO.Conditions.Features.empty()) {
2763 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2764 Condition =
2765 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2766 }
2767 return Condition;
2768}
2769
2771 llvm::Function *Resolver,
2773 llvm::Function *FuncToReturn,
2774 bool SupportsIFunc) {
2775 if (SupportsIFunc) {
2776 Builder.CreateRet(FuncToReturn);
2777 return;
2778 }
2779
2781 llvm::make_pointer_range(Resolver->args()));
2782
2783 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2784 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2785
2786 if (Resolver->getReturnType()->isVoidTy())
2787 Builder.CreateRetVoid();
2788 else
2789 Builder.CreateRet(Result);
2790}
2791
2793 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2794
2795 llvm::Triple::ArchType ArchType =
2796 getContext().getTargetInfo().getTriple().getArch();
2797
2798 switch (ArchType) {
2799 case llvm::Triple::x86:
2800 case llvm::Triple::x86_64:
2801 EmitX86MultiVersionResolver(Resolver, Options);
2802 return;
2803 case llvm::Triple::aarch64:
2804 EmitAArch64MultiVersionResolver(Resolver, Options);
2805 return;
2806
2807 default:
2808 assert(false && "Only implemented for x86 and AArch64 targets");
2809 }
2810}
2811
2813 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2814 assert(!Options.empty() && "No multiversion resolver options found");
2815 assert(Options.back().Conditions.Features.size() == 0 &&
2816 "Default case must be last");
2817 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2818 assert(SupportsIFunc &&
2819 "Multiversion resolver requires target IFUNC support");
2820 bool AArch64CpuInitialized = false;
2821 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2822
2823 for (const MultiVersionResolverOption &RO : Options) {
2824 Builder.SetInsertPoint(CurBlock);
2825 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2826
2827 // The 'default' or 'all features enabled' case.
2828 if (!Condition) {
2829 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2830 SupportsIFunc);
2831 return;
2832 }
2833
2834 if (!AArch64CpuInitialized) {
2835 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2836 EmitAArch64CpuInit();
2837 AArch64CpuInitialized = true;
2838 Builder.SetInsertPoint(CurBlock);
2839 }
2840
2841 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2842 CGBuilderTy RetBuilder(*this, RetBlock);
2843 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2844 SupportsIFunc);
2845 CurBlock = createBasicBlock("resolver_else", Resolver);
2846 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2847 }
2848
2849 // If no default, emit an unreachable.
2850 Builder.SetInsertPoint(CurBlock);
2851 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2852 TrapCall->setDoesNotReturn();
2853 TrapCall->setDoesNotThrow();
2854 Builder.CreateUnreachable();
2855 Builder.ClearInsertionPoint();
2856}
2857
2859 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2860
2861 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2862
2863 // Main function's basic block.
2864 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2865 Builder.SetInsertPoint(CurBlock);
2866 EmitX86CpuInit();
2867
2868 for (const MultiVersionResolverOption &RO : Options) {
2869 Builder.SetInsertPoint(CurBlock);
2870 llvm::Value *Condition = FormX86ResolverCondition(RO);
2871
2872 // The 'default' or 'generic' case.
2873 if (!Condition) {
2874 assert(&RO == Options.end() - 1 &&
2875 "Default or Generic case must be last");
2876 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2877 SupportsIFunc);
2878 return;
2879 }
2880
2881 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2882 CGBuilderTy RetBuilder(*this, RetBlock);
2883 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2884 SupportsIFunc);
2885 CurBlock = createBasicBlock("resolver_else", Resolver);
2886 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2887 }
2888
2889 // If no generic/default, emit an unreachable.
2890 Builder.SetInsertPoint(CurBlock);
2891 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2892 TrapCall->setDoesNotReturn();
2893 TrapCall->setDoesNotThrow();
2894 Builder.CreateUnreachable();
2895 Builder.ClearInsertionPoint();
2896}
2897
2898// Loc - where the diagnostic will point, where in the source code this
2899// alignment has failed.
2900// SecondaryLoc - if present (will be present if sufficiently different from
2901// Loc), the diagnostic will additionally point a "Note:" to this location.
2902// It should be the location where the __attribute__((assume_aligned))
2903// was written e.g.
2905 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2906 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2907 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2908 llvm::Instruction *Assumption) {
2909 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2910 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2911 llvm::Intrinsic::getDeclaration(
2912 Builder.GetInsertBlock()->getParent()->getParent(),
2913 llvm::Intrinsic::assume) &&
2914 "Assumption should be a call to llvm.assume().");
2915 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2916 "Assumption should be the last instruction of the basic block, "
2917 "since the basic block is still being generated.");
2918
2919 if (!SanOpts.has(SanitizerKind::Alignment))
2920 return;
2921
2922 // Don't check pointers to volatile data. The behavior here is implementation-
2923 // defined.
2925 return;
2926
2927 // We need to temorairly remove the assumption so we can insert the
2928 // sanitizer check before it, else the check will be dropped by optimizations.
2929 Assumption->removeFromParent();
2930
2931 {
2932 SanitizerScope SanScope(this);
2933
2934 if (!OffsetValue)
2935 OffsetValue = Builder.getInt1(false); // no offset.
2936
2937 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2938 EmitCheckSourceLocation(SecondaryLoc),
2940 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2941 EmitCheckValue(Alignment),
2942 EmitCheckValue(OffsetValue)};
2943 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2944 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2945 }
2946
2947 // We are now in the (new, empty) "cont" basic block.
2948 // Reintroduce the assumption.
2949 Builder.Insert(Assumption);
2950 // FIXME: Assumption still has it's original basic block as it's Parent.
2951}
2952
2954 if (CGDebugInfo *DI = getDebugInfo())
2955 return DI->SourceLocToDebugLoc(Location);
2956
2957 return llvm::DebugLoc();
2958}
2959
2960llvm::Value *
2961CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2962 Stmt::Likelihood LH) {
2963 switch (LH) {
2964 case Stmt::LH_None:
2965 return Cond;
2966 case Stmt::LH_Likely:
2967 case Stmt::LH_Unlikely:
2968 // Don't generate llvm.expect on -O0 as the backend won't use it for
2969 // anything.
2970 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2971 return Cond;
2972 llvm::Type *CondTy = Cond->getType();
2973 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2974 llvm::Function *FnExpect =
2975 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2976 llvm::Value *ExpectedValueOfCond =
2977 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2978 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2979 Cond->getName() + ".expval");
2980 }
2981 llvm_unreachable("Unknown Likelihood");
2982}
2983
2984llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2985 unsigned NumElementsDst,
2986 const llvm::Twine &Name) {
2987 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2988 unsigned NumElementsSrc = SrcTy->getNumElements();
2989 if (NumElementsSrc == NumElementsDst)
2990 return SrcVec;
2991
2992 std::vector<int> ShuffleMask(NumElementsDst, -1);
2993 for (unsigned MaskIdx = 0;
2994 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
2995 ShuffleMask[MaskIdx] = MaskIdx;
2996
2997 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
2998}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3259
This file provides some common utility functions for processing Lambda related AST Constructs.
StringRef P
Defines enum values for all the target-independent builtin functions.
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
Definition: MachO.h:40
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
Definition: Module.cpp:100
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
__device__ double
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const
Parses the target attributes passed in, and returns only the ones that are valid feature names.
CanQualType VoidPtrTy
Definition: ASTContext.h:1113
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:641
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2745
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:752
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3147
QualType getElementType() const
Definition: Type.h:3159
Attr - This represents one attribute.
Definition: Attr.h:42
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3834
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:3966
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:255
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2528
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2053
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition: DeclCXX.cpp:2460
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2179
QualType getThisType() const
Return the type of the this pointer.
Definition: DeclCXX.cpp:2563
bool isStatic() const
Definition: DeclCXX.cpp:2185
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1021
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1640
bool isCapturelessLambda() const
Definition: DeclCXX.h:1067
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1192
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2819
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition: Expr.h:3010
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1618
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
std::string SampleProfileFile
Name of the profile file to use with -fprofile-sample-use.
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
bool hasSanitizeCoverage() const
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
bool hasSanitizeBinaryMetadata() const
unsigned getInAllocaFieldIndex() const
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CharUnits getIndirectAlign() const
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:46
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:78
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Value * getPointer() const
Definition: Address.h:51
bool isValid() const
Definition: Address.h:47
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:823
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:870
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper,...
Definition: CGBuilder.h:26
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:326
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:297
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:135
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:127
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:162
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for 'this'.
Definition: CGCXXABI.cpp:118
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:117
All available information about a concrete callee.
Definition: CGCall.h:62
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
CanQualType getReturnType() const
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
void emitEntryFunction(const FunctionDecl *FD, llvm::Function *Fn)
virtual void functionFinished(CodeGenFunction &CGF)
Cleans up references to the objects in finished function.
llvm::OpenMPIRBuilder & getOMPBuilder()
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D)
Emits OpenMP-specific function prolog.
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitDestructorBody(FunctionArgList &Args)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void unprotectFromPeepholes(PeepholeProtection protection)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
static bool hasScalarEvaluationKind(QualType T)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
void EmitFunctionBody(const Stmt *Body)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
const TargetInfo & getTarget() const
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address EmitVAListRef(const Expr *E)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::BasicBlock * GetIndirectGotoBlock()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Constant * EmitAnnotationArgs(const AnnotateAttr *Attr)
Emit additional args of the annotation.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process.
ProfileList::ExclusionType isFunctionBlockedFromProfileInstr(llvm::Function *Fn, SourceLocation Loc) const
ASTContext & getContext() const
llvm::SanitizerStatReport & getSanStats()
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::ConstantInt * CreateKCFITypeId(QualType T)
Generate a KCFI type identifier for T.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
Definition: CGCall.cpp:1814
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation's translation unit.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:318
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:144
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:171
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:417
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:350
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:829
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:212
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1604
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:4173
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:85
T * getAttr() const
Definition: DeclBase.h:578
ASTContext & getASTContext() const LLVM_READONLY
Definition: DeclBase.cpp:501
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1174
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:564
SourceLocation getLocation() const
Definition: DeclBase.h:444
bool hasAttr() const
Definition: DeclBase.h:582
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
bool isIgnored(unsigned DiagID, SourceLocation Loc) const
Determine whether the diagnostic is known to be ignored.
Definition: Diagnostic.h:916
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition: Expr.cpp:3832
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3041
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3025
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx, SourceLocation *Loc=nullptr) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
QualType getType() const
Definition: Expr.h:142
ExtVectorType - Extended vector type.
Definition: Type.h:3604
LangOptions::FPExceptionModeKind getExceptionMode() const
Definition: LangOptions.h:818
bool allowFPContractAcrossStatement() const
Definition: LangOptions.h:793
RoundingMode getRoundingMode() const
Definition: LangOptions.h:806
Represents a member of a struct/union/class.
Definition: Decl.h:3025
Represents a function declaration or definition.
Definition: Decl.h:1959
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition: Decl.h:2574
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:3201
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3597
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition: Decl.h:2786
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2455
QualType getReturnType() const
Definition: Decl.h:2722
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2651
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:4078
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:3279
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition: Decl.cpp:3414
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2365
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition: Decl.cpp:3271
bool isDefaulted() const
Whether this function is defaulted.
Definition: Decl.h:2322
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition: Decl.cpp:3944
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4199
QualType desugar() const
Definition: Type.h:4666
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:105
const Decl * getDecl() const
Definition: GlobalDecl.h:103
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:5338
Represents the declaration of a label.
Definition: Decl.h:499
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:264
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
Definition: LangOptions.h:270
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
Definition: LangOptions.h:268
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Definition: LangOptions.h:266
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:418
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:424
RoundingMode getDefaultRoundingMode() const
Definition: LangOptions.h:705
virtual void mangleCanonicalTypeName(QualType T, raw_ostream &, bool NormalizeIntegers=false)=0
Generates a unique string for an externally visible type for use with TBAA or type uniquing.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a parameter to a function.
Definition: Decl.h:1749
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:126
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2898
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition: ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition: ProfileList.h:35
@ Allow
Profiling is allowed.
Definition: ProfileList.h:33
A (possibly-)qualified type.
Definition: Type.h:737
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6985
field_range fields() const
Definition: Decl.h:4339
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5092
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:204
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1356
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:326
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1299
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1300
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1301
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1303
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1220
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1452
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1291
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts) const
Returns target-specific min and max values VScale_Range.
Definition: TargetInfo.h:981
The base class of the type hierarchy.
Definition: Type.h:1606
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1819
bool isVoidType() const
Definition: Type.h:7443
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2083
bool isPointerType() const
Definition: Type.h:7154
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:651
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2438
TypeClass getTypeClass() const
Definition: Type.h:2074
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7657
bool isObjCRetainableType() const
Definition: Type.cpp:4758
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:4516
bool isFunctionNoProtoType() const
Definition: Type.h:2262
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2182
QualType getType() const
Definition: Decl.h:717
Represents a variable declaration or definition.
Definition: Decl.h:918
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3290
Expr * getSizeExpr() const
Definition: Type.h:3309
QualType getElementType() const
Definition: Type.h:3526
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition: limits.h:60
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition: XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition: XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1809
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:64
@ CPlusPlus
Definition: LangStandard.h:54
@ NonNull
Values of this type can never be null.
BinaryOperatorKind
@ OMF_initialize
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
@ Result
The result type of a method or function.
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ Other
Other implicit parameter.
@ EST_None
no exception specification
unsigned long uint64_t
YAML serialization mapping.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
This structure provides a set of types that are commonly used during IR emission.
llvm::PointerType * ConstGlobalsPtrTy
void* in the address space for constant globals
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:56
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:168
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:182
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:165
XRayInstrMask Mask
Definition: XRayInstr.h:65
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:48