clang 19.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/FPEnv.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/MDBuilder.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/Support/CRC.h"
48#include "llvm/Support/xxhash.h"
49#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
50#include "llvm/Transforms/Utils/PromoteMemToReg.h"
51#include <optional>
52
53using namespace clang;
54using namespace CodeGen;
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
61/// markers.
62static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
63 const LangOptions &LangOpts) {
64 if (CGOpts.DisableLifetimeMarkers)
65 return false;
66
67 // Sanitizers may use markers.
68 if (CGOpts.SanitizeAddressUseAfterScope ||
69 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
70 LangOpts.Sanitize.has(SanitizerKind::Memory))
71 return true;
72
73 // For now, only in optimized builds.
74 return CGOpts.OptimizationLevel != 0;
75}
76
77CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
78 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
79 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
81 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
82 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
83 ShouldEmitLifetimeMarkers(
84 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
85 if (!suppressNewContext)
86 CGM.getCXXABI().getMangleContext().startNewFunction();
87 EHStack.setCGF(this);
88
89 SetFastMathFlags(CurFPFeatures);
90}
91
92CodeGenFunction::~CodeGenFunction() {
93 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94
95 if (getLangOpts().OpenMP && CurFn)
97
98 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
99 // outlining etc) at some point. Doing it once the function codegen is done
100 // seems to be a reasonable spot. We do it here, as opposed to the deletion
101 // time of the CodeGenModule, because we have to ensure the IR has not yet
102 // been "emitted" to the outside, thus, modifications are still sensible.
103 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
105}
106
107// Map the LangOption for exception behavior into
108// the corresponding enum in the IR.
109llvm::fp::ExceptionBehavior
111
112 switch (Kind) {
113 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
114 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
115 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
116 default:
117 llvm_unreachable("Unsupported FP Exception Behavior");
118 }
119}
120
122 llvm::FastMathFlags FMF;
123 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
124 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
125 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
126 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
127 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
128 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
129 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
130 Builder.setFastMathFlags(FMF);
131}
132
134 const Expr *E)
135 : CGF(CGF) {
136 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
137}
138
140 FPOptions FPFeatures)
141 : CGF(CGF) {
142 ConstructorHelper(FPFeatures);
143}
144
145void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
146 OldFPFeatures = CGF.CurFPFeatures;
147 CGF.CurFPFeatures = FPFeatures;
148
149 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
150 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
151
152 if (OldFPFeatures == FPFeatures)
153 return;
154
155 FMFGuard.emplace(CGF.Builder);
156
157 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
158 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
159 auto NewExceptionBehavior =
161 FPFeatures.getExceptionMode()));
162 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
163
164 CGF.SetFastMathFlags(FPFeatures);
165
166 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
167 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
168 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
169 (NewExceptionBehavior == llvm::fp::ebIgnore &&
170 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
171 "FPConstrained should be enabled on entire function");
172
173 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
174 auto OldValue =
175 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
176 auto NewValue = OldValue & Value;
177 if (OldValue != NewValue)
178 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
179 };
180 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
181 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
182 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
183 mergeFnAttrValue(
184 "unsafe-fp-math",
185 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
186 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
187 FPFeatures.allowFPContractAcrossStatement());
188}
189
191 CGF.CurFPFeatures = OldFPFeatures;
192 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
193 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
194}
195
197 bool ForPointeeType,
198 CodeGenFunction &CGF) {
199 LValueBaseInfo BaseInfo;
200 TBAAAccessInfo TBAAInfo;
201 CharUnits Alignment =
202 CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
203 Address Addr = Address(V, CGF.ConvertTypeForMem(T), Alignment);
204 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
205}
206
208 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this);
209}
210
211LValue
213 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this);
214}
215
217 QualType T) {
218 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this);
219}
220
222 QualType T) {
223 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this);
224}
225
228}
229
231 return CGM.getTypes().ConvertType(T);
232}
233
235 type = type.getCanonicalType();
236 while (true) {
237 switch (type->getTypeClass()) {
238#define TYPE(name, parent)
239#define ABSTRACT_TYPE(name, parent)
240#define NON_CANONICAL_TYPE(name, parent) case Type::name:
241#define DEPENDENT_TYPE(name, parent) case Type::name:
242#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
243#include "clang/AST/TypeNodes.inc"
244 llvm_unreachable("non-canonical or dependent type in IR-generation");
245
246 case Type::Auto:
247 case Type::DeducedTemplateSpecialization:
248 llvm_unreachable("undeduced type in IR-generation");
249
250 // Various scalar types.
251 case Type::Builtin:
252 case Type::Pointer:
253 case Type::BlockPointer:
254 case Type::LValueReference:
255 case Type::RValueReference:
256 case Type::MemberPointer:
257 case Type::Vector:
258 case Type::ExtVector:
259 case Type::ConstantMatrix:
260 case Type::FunctionProto:
261 case Type::FunctionNoProto:
262 case Type::Enum:
263 case Type::ObjCObjectPointer:
264 case Type::Pipe:
265 case Type::BitInt:
266 return TEK_Scalar;
267
268 // Complexes.
269 case Type::Complex:
270 return TEK_Complex;
271
272 // Arrays, records, and Objective-C objects.
273 case Type::ConstantArray:
274 case Type::IncompleteArray:
275 case Type::VariableArray:
276 case Type::Record:
277 case Type::ObjCObject:
278 case Type::ObjCInterface:
279 case Type::ArrayParameter:
280 return TEK_Aggregate;
281
282 // We operate on atomic values according to their underlying type.
283 case Type::Atomic:
284 type = cast<AtomicType>(type)->getValueType();
285 continue;
286 }
287 llvm_unreachable("unknown type kind!");
288 }
289}
290
291llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
292 // For cleanliness, we try to avoid emitting the return block for
293 // simple cases.
294 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
295
296 if (CurBB) {
297 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
298
299 // We have a valid insert point, reuse it if it is empty or there are no
300 // explicit jumps to the return block.
301 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
302 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
303 delete ReturnBlock.getBlock();
304 ReturnBlock = JumpDest();
305 } else
307 return llvm::DebugLoc();
308 }
309
310 // Otherwise, if the return block is the target of a single direct
311 // branch then we can just put the code in that block instead. This
312 // cleans up functions which started with a unified return block.
313 if (ReturnBlock.getBlock()->hasOneUse()) {
314 llvm::BranchInst *BI =
315 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
316 if (BI && BI->isUnconditional() &&
317 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
318 // Record/return the DebugLoc of the simple 'return' expression to be used
319 // later by the actual 'ret' instruction.
320 llvm::DebugLoc Loc = BI->getDebugLoc();
321 Builder.SetInsertPoint(BI->getParent());
322 BI->eraseFromParent();
323 delete ReturnBlock.getBlock();
324 ReturnBlock = JumpDest();
325 return Loc;
326 }
327 }
328
329 // FIXME: We are at an unreachable point, there is no reason to emit the block
330 // unless it has uses. However, we still need a place to put the debug
331 // region.end for now.
332
334 return llvm::DebugLoc();
335}
336
337static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
338 if (!BB) return;
339 if (!BB->use_empty()) {
340 CGF.CurFn->insert(CGF.CurFn->end(), BB);
341 return;
342 }
343 delete BB;
344}
345
347 assert(BreakContinueStack.empty() &&
348 "mismatched push/pop in break/continue stack!");
349
350 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
351 && NumSimpleReturnExprs == NumReturnExprs
352 && ReturnBlock.getBlock()->use_empty();
353 // Usually the return expression is evaluated before the cleanup
354 // code. If the function contains only a simple return statement,
355 // such as a constant, the location before the cleanup code becomes
356 // the last useful breakpoint in the function, because the simple
357 // return expression will be evaluated after the cleanup code. To be
358 // safe, set the debug location for cleanup code to the location of
359 // the return statement. Otherwise the cleanup code should be at the
360 // end of the function's lexical scope.
361 //
362 // If there are multiple branches to the return block, the branch
363 // instructions will get the location of the return statements and
364 // all will be fine.
365 if (CGDebugInfo *DI = getDebugInfo()) {
366 if (OnlySimpleReturnStmts)
367 DI->EmitLocation(Builder, LastStopPoint);
368 else
369 DI->EmitLocation(Builder, EndLoc);
370 }
371
372 // Pop any cleanups that might have been associated with the
373 // parameters. Do this in whatever block we're currently in; it's
374 // important to do this before we enter the return block or return
375 // edges will be *really* confused.
376 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
377 bool HasOnlyLifetimeMarkers =
379 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
380
381 std::optional<ApplyDebugLocation> OAL;
382 if (HasCleanups) {
383 // Make sure the line table doesn't jump back into the body for
384 // the ret after it's been at EndLoc.
385 if (CGDebugInfo *DI = getDebugInfo()) {
386 if (OnlySimpleReturnStmts)
387 DI->EmitLocation(Builder, EndLoc);
388 else
389 // We may not have a valid end location. Try to apply it anyway, and
390 // fall back to an artificial location if needed.
392 }
393
395 }
396
397 // Emit function epilog (to return).
398 llvm::DebugLoc Loc = EmitReturnBlock();
399
401 if (CGM.getCodeGenOpts().InstrumentFunctions)
402 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
403 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
404 CurFn->addFnAttr("instrument-function-exit-inlined",
405 "__cyg_profile_func_exit");
406 }
407
408 // Emit debug descriptor for function end.
409 if (CGDebugInfo *DI = getDebugInfo())
410 DI->EmitFunctionEnd(Builder, CurFn);
411
412 // Reset the debug location to that of the simple 'return' expression, if any
413 // rather than that of the end of the function's scope '}'.
414 ApplyDebugLocation AL(*this, Loc);
415 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
417
418 assert(EHStack.empty() &&
419 "did not remove all scopes from cleanup stack!");
420
421 // If someone did an indirect goto, emit the indirect goto block at the end of
422 // the function.
423 if (IndirectBranch) {
424 EmitBlock(IndirectBranch->getParent());
425 Builder.ClearInsertionPoint();
426 }
427
428 // If some of our locals escaped, insert a call to llvm.localescape in the
429 // entry block.
430 if (!EscapedLocals.empty()) {
431 // Invert the map from local to index into a simple vector. There should be
432 // no holes.
434 EscapeArgs.resize(EscapedLocals.size());
435 for (auto &Pair : EscapedLocals)
436 EscapeArgs[Pair.second] = Pair.first;
437 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
438 &CGM.getModule(), llvm::Intrinsic::localescape);
439 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
440 }
441
442 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
443 llvm::Instruction *Ptr = AllocaInsertPt;
444 AllocaInsertPt = nullptr;
445 Ptr->eraseFromParent();
446
447 // PostAllocaInsertPt, if created, was lazily created when it was required,
448 // remove it now since it was just created for our own convenience.
449 if (PostAllocaInsertPt) {
450 llvm::Instruction *PostPtr = PostAllocaInsertPt;
451 PostAllocaInsertPt = nullptr;
452 PostPtr->eraseFromParent();
453 }
454
455 // If someone took the address of a label but never did an indirect goto, we
456 // made a zero entry PHI node, which is illegal, zap it now.
457 if (IndirectBranch) {
458 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
459 if (PN->getNumIncomingValues() == 0) {
460 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
461 PN->eraseFromParent();
462 }
463 }
464
466 EmitIfUsed(*this, TerminateLandingPad);
467 EmitIfUsed(*this, TerminateHandler);
468 EmitIfUsed(*this, UnreachableBlock);
469
470 for (const auto &FuncletAndParent : TerminateFunclets)
471 EmitIfUsed(*this, FuncletAndParent.second);
472
473 if (CGM.getCodeGenOpts().EmitDeclMetadata)
474 EmitDeclMetadata();
475
476 for (const auto &R : DeferredReplacements) {
477 if (llvm::Value *Old = R.first) {
478 Old->replaceAllUsesWith(R.second);
479 cast<llvm::Instruction>(Old)->eraseFromParent();
480 }
481 }
482 DeferredReplacements.clear();
483
484 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
485 // PHIs if the current function is a coroutine. We don't do it for all
486 // functions as it may result in slight increase in numbers of instructions
487 // if compiled with no optimizations. We do it for coroutine as the lifetime
488 // of CleanupDestSlot alloca make correct coroutine frame building very
489 // difficult.
491 llvm::DominatorTree DT(*CurFn);
492 llvm::PromoteMemToReg(
493 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
495 }
496
497 // Scan function arguments for vector width.
498 for (llvm::Argument &A : CurFn->args())
499 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
500 LargestVectorWidth =
501 std::max((uint64_t)LargestVectorWidth,
502 VT->getPrimitiveSizeInBits().getKnownMinValue());
503
504 // Update vector width based on return type.
505 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
506 LargestVectorWidth =
507 std::max((uint64_t)LargestVectorWidth,
508 VT->getPrimitiveSizeInBits().getKnownMinValue());
509
510 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
511 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
512
513 // Add the min-legal-vector-width attribute. This contains the max width from:
514 // 1. min-vector-width attribute used in the source program.
515 // 2. Any builtins used that have a vector width specified.
516 // 3. Values passed in and out of inline assembly.
517 // 4. Width of vector arguments and return types for this function.
518 // 5. Width of vector arguments and return types for functions called by this
519 // function.
520 if (getContext().getTargetInfo().getTriple().isX86())
521 CurFn->addFnAttr("min-legal-vector-width",
522 llvm::utostr(LargestVectorWidth));
523
524 // Add vscale_range attribute if appropriate.
525 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
527 if (VScaleRange) {
528 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
529 getLLVMContext(), VScaleRange->first, VScaleRange->second));
530 }
531
532 // If we generated an unreachable return block, delete it now.
533 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
534 Builder.ClearInsertionPoint();
535 ReturnBlock.getBlock()->eraseFromParent();
536 }
537 if (ReturnValue.isValid()) {
538 auto *RetAlloca =
539 dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
540 if (RetAlloca && RetAlloca->use_empty()) {
541 RetAlloca->eraseFromParent();
543 }
544 }
545}
546
547/// ShouldInstrumentFunction - Return true if the current function should be
548/// instrumented with __cyg_profile_func_* calls
550 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
551 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
552 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
553 return false;
554 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
555 return false;
556 return true;
557}
558
560 if (!CurFuncDecl)
561 return false;
562 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
563}
564
565/// ShouldXRayInstrument - Return true if the current function should be
566/// instrumented with XRay nop sleds.
568 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
569}
570
571/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
572/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
574 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
575 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
578}
579
581 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
582 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
585}
586
587llvm::ConstantInt *
589 // Remove any (C++17) exception specifications, to allow calling e.g. a
590 // noexcept function through a non-noexcept pointer.
591 if (!Ty->isFunctionNoProtoType())
593 std::string Mangled;
594 llvm::raw_string_ostream Out(Mangled);
596 return llvm::ConstantInt::get(
597 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
598}
599
600void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
601 llvm::Function *Fn) {
602 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
603 return;
604
605 llvm::LLVMContext &Context = getLLVMContext();
606
607 CGM.GenKernelArgMetadata(Fn, FD, this);
608
609 if (!getLangOpts().OpenCL)
610 return;
611
612 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
613 QualType HintQTy = A->getTypeHint();
614 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
615 bool IsSignedInteger =
616 HintQTy->isSignedIntegerType() ||
617 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
618 llvm::Metadata *AttrMDArgs[] = {
619 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
620 CGM.getTypes().ConvertType(A->getTypeHint()))),
621 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
622 llvm::IntegerType::get(Context, 32),
623 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
624 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
625 }
626
627 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
628 llvm::Metadata *AttrMDArgs[] = {
629 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
630 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
631 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
632 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
633 }
634
635 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
636 llvm::Metadata *AttrMDArgs[] = {
637 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
638 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
639 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
640 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
641 }
642
643 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
644 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
645 llvm::Metadata *AttrMDArgs[] = {
646 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
647 Fn->setMetadata("intel_reqd_sub_group_size",
648 llvm::MDNode::get(Context, AttrMDArgs));
649 }
650}
651
652/// Determine whether the function F ends with a return stmt.
653static bool endsWithReturn(const Decl* F) {
654 const Stmt *Body = nullptr;
655 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
656 Body = FD->getBody();
657 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
658 Body = OMD->getBody();
659
660 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
661 auto LastStmt = CS->body_rbegin();
662 if (LastStmt != CS->body_rend())
663 return isa<ReturnStmt>(*LastStmt);
664 }
665 return false;
666}
667
669 if (SanOpts.has(SanitizerKind::Thread)) {
670 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
671 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
672 }
673}
674
675/// Check if the return value of this function requires sanitization.
676bool CodeGenFunction::requiresReturnValueCheck() const {
677 return requiresReturnValueNullabilityCheck() ||
678 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
679 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
680}
681
682static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
683 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
684 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
685 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
686 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
687 return false;
688
689 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
690 return false;
691
692 if (MD->getNumParams() == 2) {
693 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
694 if (!PT || !PT->isVoidPointerType() ||
695 !PT->getPointeeType().isConstQualified())
696 return false;
697 }
698
699 return true;
700}
701
702bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
703 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
704 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
705}
706
707bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
708 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
710 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
711 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
712 });
713}
714
715/// Return the UBSan prologue signature for \p FD if one is available.
716static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
717 const FunctionDecl *FD) {
718 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
719 if (!MD->isStatic())
720 return nullptr;
722}
723
725 llvm::Function *Fn,
726 const CGFunctionInfo &FnInfo,
727 const FunctionArgList &Args,
728 SourceLocation Loc,
729 SourceLocation StartLoc) {
730 assert(!CurFn &&
731 "Do not use a CodeGenFunction object for more than one function");
732
733 const Decl *D = GD.getDecl();
734
735 DidCallStackSave = false;
736 CurCodeDecl = D;
737 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
738 if (FD && FD->usesSEHTry())
739 CurSEHParent = GD;
740 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
741 FnRetTy = RetTy;
742 CurFn = Fn;
743 CurFnInfo = &FnInfo;
744 assert(CurFn->isDeclaration() && "Function already has body?");
745
746 // If this function is ignored for any of the enabled sanitizers,
747 // disable the sanitizer for the function.
748 do {
749#define SANITIZER(NAME, ID) \
750 if (SanOpts.empty()) \
751 break; \
752 if (SanOpts.has(SanitizerKind::ID)) \
753 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
754 SanOpts.set(SanitizerKind::ID, false);
755
756#include "clang/Basic/Sanitizers.def"
757#undef SANITIZER
758 } while (false);
759
760 if (D) {
761 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
762 SanitizerMask no_sanitize_mask;
763 bool NoSanitizeCoverage = false;
764
765 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
766 no_sanitize_mask |= Attr->getMask();
767 // SanitizeCoverage is not handled by SanOpts.
768 if (Attr->hasCoverage())
769 NoSanitizeCoverage = true;
770 }
771
772 // Apply the no_sanitize* attributes to SanOpts.
773 SanOpts.Mask &= ~no_sanitize_mask;
774 if (no_sanitize_mask & SanitizerKind::Address)
775 SanOpts.set(SanitizerKind::KernelAddress, false);
776 if (no_sanitize_mask & SanitizerKind::KernelAddress)
777 SanOpts.set(SanitizerKind::Address, false);
778 if (no_sanitize_mask & SanitizerKind::HWAddress)
779 SanOpts.set(SanitizerKind::KernelHWAddress, false);
780 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
781 SanOpts.set(SanitizerKind::HWAddress, false);
782
783 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
784 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
785
786 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
787 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
788
789 // Some passes need the non-negated no_sanitize attribute. Pass them on.
791 if (no_sanitize_mask & SanitizerKind::Thread)
792 Fn->addFnAttr("no_sanitize_thread");
793 }
794 }
795
797 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
798 } else {
799 // Apply sanitizer attributes to the function.
800 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
801 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
802 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
803 SanitizerKind::KernelHWAddress))
804 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
805 if (SanOpts.has(SanitizerKind::MemtagStack))
806 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
807 if (SanOpts.has(SanitizerKind::Thread))
808 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
809 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
810 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
811 }
812 if (SanOpts.has(SanitizerKind::SafeStack))
813 Fn->addFnAttr(llvm::Attribute::SafeStack);
814 if (SanOpts.has(SanitizerKind::ShadowCallStack))
815 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
816
817 // Apply fuzzing attribute to the function.
818 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
819 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
820
821 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
822 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
823 if (SanOpts.has(SanitizerKind::Thread)) {
824 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
825 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
826 if (OMD->getMethodFamily() == OMF_dealloc ||
827 OMD->getMethodFamily() == OMF_initialize ||
828 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
830 }
831 }
832 }
833
834 // Ignore unrelated casts in STL allocate() since the allocator must cast
835 // from void* to T* before object initialization completes. Don't match on the
836 // namespace because not all allocators are in std::
837 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
839 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
840 }
841
842 // Ignore null checks in coroutine functions since the coroutines passes
843 // are not aware of how to move the extra UBSan instructions across the split
844 // coroutine boundaries.
845 if (D && SanOpts.has(SanitizerKind::Null))
846 if (FD && FD->getBody() &&
847 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
848 SanOpts.Mask &= ~SanitizerKind::Null;
849
850 // Apply xray attributes to the function (as a string, for now)
851 bool AlwaysXRayAttr = false;
852 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
857 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
858 Fn->addFnAttr("function-instrument", "xray-always");
859 AlwaysXRayAttr = true;
860 }
861 if (XRayAttr->neverXRayInstrument())
862 Fn->addFnAttr("function-instrument", "xray-never");
863 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
865 Fn->addFnAttr("xray-log-args",
866 llvm::utostr(LogArgs->getArgumentCount()));
867 }
868 } else {
870 Fn->addFnAttr(
871 "xray-instruction-threshold",
872 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
873 }
874
876 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
877 Fn->addFnAttr("xray-ignore-loops");
878
881 Fn->addFnAttr("xray-skip-exit");
882
885 Fn->addFnAttr("xray-skip-entry");
886
887 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
888 if (FuncGroups > 1) {
889 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
890 CurFn->getName().bytes_end());
891 auto Group = crc32(FuncName) % FuncGroups;
892 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
893 !AlwaysXRayAttr)
894 Fn->addFnAttr("function-instrument", "xray-never");
895 }
896 }
897
898 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
899 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
901 Fn->addFnAttr(llvm::Attribute::SkipProfile);
902 break;
904 Fn->addFnAttr(llvm::Attribute::NoProfile);
905 break;
907 break;
908 }
909 }
910
911 unsigned Count, Offset;
912 if (const auto *Attr =
913 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
914 Count = Attr->getCount();
915 Offset = Attr->getOffset();
916 } else {
917 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
918 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
919 }
920 if (Count && Offset <= Count) {
921 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
922 if (Offset)
923 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
924 }
925 // Instruct that functions for COFF/CodeView targets should start with a
926 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
927 // backends as they don't need it -- instructions on these architectures are
928 // always atomically patchable at runtime.
929 if (CGM.getCodeGenOpts().HotPatch &&
930 getContext().getTargetInfo().getTriple().isX86() &&
931 getContext().getTargetInfo().getTriple().getEnvironment() !=
932 llvm::Triple::CODE16)
933 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
934
935 // Add no-jump-tables value.
936 if (CGM.getCodeGenOpts().NoUseJumpTables)
937 Fn->addFnAttr("no-jump-tables", "true");
938
939 // Add no-inline-line-tables value.
940 if (CGM.getCodeGenOpts().NoInlineLineTables)
941 Fn->addFnAttr("no-inline-line-tables");
942
943 // Add profile-sample-accurate value.
944 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
945 Fn->addFnAttr("profile-sample-accurate");
946
947 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
948 Fn->addFnAttr("use-sample-profile");
949
950 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
951 Fn->addFnAttr("cfi-canonical-jump-table");
952
953 if (D && D->hasAttr<NoProfileFunctionAttr>())
954 Fn->addFnAttr(llvm::Attribute::NoProfile);
955
956 if (D) {
957 // Function attributes take precedence over command line flags.
958 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
959 switch (A->getThunkType()) {
960 case FunctionReturnThunksAttr::Kind::Keep:
961 break;
962 case FunctionReturnThunksAttr::Kind::Extern:
963 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
964 break;
965 }
966 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
967 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
968 }
969
970 if (FD && (getLangOpts().OpenCL ||
971 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
972 // Add metadata for a kernel function.
973 EmitKernelMetadata(FD, Fn);
974 }
975
976 // If we are checking function types, emit a function type signature as
977 // prologue data.
978 if (FD && SanOpts.has(SanitizerKind::Function)) {
979 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
980 llvm::LLVMContext &Ctx = Fn->getContext();
981 llvm::MDBuilder MDB(Ctx);
982 Fn->setMetadata(
983 llvm::LLVMContext::MD_func_sanitize,
984 MDB.createRTTIPointerPrologue(
985 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
986 }
987 }
988
989 // If we're checking nullability, we need to know whether we can check the
990 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
991 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
993 if (Nullability && *Nullability == NullabilityKind::NonNull &&
994 !FnRetTy->isRecordType()) {
995 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
996 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
997 RetValNullabilityPrecondition =
998 llvm::ConstantInt::getTrue(getLLVMContext());
999 }
1000 }
1001
1002 // If we're in C++ mode and the function name is "main", it is guaranteed
1003 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1004 // used within a program").
1005 //
1006 // OpenCL C 2.0 v2.2-11 s6.9.i:
1007 // Recursion is not supported.
1008 //
1009 // SYCL v1.2.1 s3.10:
1010 // kernels cannot include RTTI information, exception classes,
1011 // recursive code, virtual functions or make use of C++ libraries that
1012 // are not compiled for the device.
1013 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
1014 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
1015 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1016 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1017
1018 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1019 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1020 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1021 Builder.setDefaultConstrainedRounding(RM);
1022 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1023 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1024 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1025 RM != llvm::RoundingMode::NearestTiesToEven))) {
1026 Builder.setIsFPConstrained(true);
1027 Fn->addFnAttr(llvm::Attribute::StrictFP);
1028 }
1029
1030 // If a custom alignment is used, force realigning to this alignment on
1031 // any main function which certainly will need it.
1032 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1033 CGM.getCodeGenOpts().StackAlignment))
1034 Fn->addFnAttr("stackrealign");
1035
1036 // "main" doesn't need to zero out call-used registers.
1037 if (FD && FD->isMain())
1038 Fn->removeFnAttr("zero-call-used-regs");
1039
1040 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1041
1042 // Create a marker to make it easy to insert allocas into the entryblock
1043 // later. Don't create this with the builder, because we don't want it
1044 // folded.
1045 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1046 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1047
1049
1050 Builder.SetInsertPoint(EntryBB);
1051
1052 // If we're checking the return value, allocate space for a pointer to a
1053 // precise source location of the checked return statement.
1054 if (requiresReturnValueCheck()) {
1055 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1056 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1057 ReturnLocation);
1058 }
1059
1060 // Emit subprogram debug descriptor.
1061 if (CGDebugInfo *DI = getDebugInfo()) {
1062 // Reconstruct the type from the argument list so that implicit parameters,
1063 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1064 // convention.
1065 DI->emitFunctionStart(GD, Loc, StartLoc,
1066 DI->getFunctionType(FD, RetTy, Args), CurFn,
1068 }
1069
1071 if (CGM.getCodeGenOpts().InstrumentFunctions)
1072 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1073 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1074 CurFn->addFnAttr("instrument-function-entry-inlined",
1075 "__cyg_profile_func_enter");
1076 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1077 CurFn->addFnAttr("instrument-function-entry-inlined",
1078 "__cyg_profile_func_enter_bare");
1079 }
1080
1081 // Since emitting the mcount call here impacts optimizations such as function
1082 // inlining, we just add an attribute to insert a mcount call in backend.
1083 // The attribute "counting-function" is set to mcount function name which is
1084 // architecture dependent.
1085 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1086 // Calls to fentry/mcount should not be generated if function has
1087 // the no_instrument_function attribute.
1088 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1089 if (CGM.getCodeGenOpts().CallFEntry)
1090 Fn->addFnAttr("fentry-call", "true");
1091 else {
1092 Fn->addFnAttr("instrument-function-entry-inlined",
1093 getTarget().getMCountName());
1094 }
1095 if (CGM.getCodeGenOpts().MNopMCount) {
1096 if (!CGM.getCodeGenOpts().CallFEntry)
1097 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1098 << "-mnop-mcount" << "-mfentry";
1099 Fn->addFnAttr("mnop-mcount");
1100 }
1101
1102 if (CGM.getCodeGenOpts().RecordMCount) {
1103 if (!CGM.getCodeGenOpts().CallFEntry)
1104 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1105 << "-mrecord-mcount" << "-mfentry";
1106 Fn->addFnAttr("mrecord-mcount");
1107 }
1108 }
1109 }
1110
1111 if (CGM.getCodeGenOpts().PackedStack) {
1112 if (getContext().getTargetInfo().getTriple().getArch() !=
1113 llvm::Triple::systemz)
1114 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1115 << "-mpacked-stack";
1116 Fn->addFnAttr("packed-stack");
1117 }
1118
1119 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1120 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1121 Fn->addFnAttr("warn-stack-size",
1122 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1123
1124 if (RetTy->isVoidType()) {
1125 // Void type; nothing to return.
1127
1128 // Count the implicit return.
1129 if (!endsWithReturn(D))
1130 ++NumReturnExprs;
1132 // Indirect return; emit returned value directly into sret slot.
1133 // This reduces code size, and affects correctness in C++.
1134 auto AI = CurFn->arg_begin();
1136 ++AI;
1138 &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1139 nullptr, nullptr, KnownNonNull);
1145 }
1148 // Load the sret pointer from the argument struct and return into that.
1149 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1150 llvm::Function::arg_iterator EI = CurFn->arg_end();
1151 --EI;
1152 llvm::Value *Addr = Builder.CreateStructGEP(
1153 CurFnInfo->getArgStruct(), &*EI, Idx);
1154 llvm::Type *Ty =
1155 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1157 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1158 ReturnValue = Address(Addr, ConvertType(RetTy),
1160 } else {
1161 ReturnValue = CreateIRTemp(RetTy, "retval");
1162
1163 // Tell the epilog emitter to autorelease the result. We do this
1164 // now so that various specialized functions can suppress it
1165 // during their IR-generation.
1166 if (getLangOpts().ObjCAutoRefCount &&
1168 RetTy->isObjCRetainableType())
1169 AutoreleaseResult = true;
1170 }
1171
1173
1175
1176 // Emit OpenMP specific initialization of the device functions.
1177 if (getLangOpts().OpenMP && CurCodeDecl)
1179
1180 // Handle emitting HLSL entry functions.
1181 if (D && D->hasAttr<HLSLShaderAttr>())
1183
1185
1186 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1187 MD && !MD->isStatic()) {
1188 bool IsInLambda =
1189 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1192 if (IsInLambda) {
1193 // We're in a lambda; figure out the captures.
1197 // If the lambda captures the object referred to by '*this' - either by
1198 // value or by reference, make sure CXXThisValue points to the correct
1199 // object.
1200
1201 // Get the lvalue for the field (which is a copy of the enclosing object
1202 // or contains the address of the enclosing object).
1205 // If the enclosing object was captured by value, just use its
1206 // address. Sign this pointer.
1207 CXXThisValue = ThisFieldLValue.getPointer(*this);
1208 } else {
1209 // Load the lvalue pointed to by the field, since '*this' was captured
1210 // by reference.
1211 CXXThisValue =
1212 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1213 }
1214 }
1215 for (auto *FD : MD->getParent()->fields()) {
1216 if (FD->hasCapturedVLAType()) {
1217 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1219 auto VAT = FD->getCapturedVLAType();
1220 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1221 }
1222 }
1223 } else if (MD->isImplicitObjectMemberFunction()) {
1224 // Not in a lambda; just use 'this' from the method.
1225 // FIXME: Should we generate a new load for each use of 'this'? The
1226 // fast register allocator would be happier...
1227 CXXThisValue = CXXABIThisValue;
1228 }
1229
1230 // Check the 'this' pointer once per function, if it's available.
1231 if (CXXABIThisValue) {
1232 SanitizerSet SkippedChecks;
1233 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1234 QualType ThisTy = MD->getThisType();
1235
1236 // If this is the call operator of a lambda with no captures, it
1237 // may have a static invoker function, which may call this operator with
1238 // a null 'this' pointer.
1240 SkippedChecks.set(SanitizerKind::Null, true);
1241
1243 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1244 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1245 }
1246 }
1247
1248 // If any of the arguments have a variably modified type, make sure to
1249 // emit the type size, but only if the function is not naked. Naked functions
1250 // have no prolog to run this evaluation.
1251 if (!FD || !FD->hasAttr<NakedAttr>()) {
1252 for (const VarDecl *VD : Args) {
1253 // Dig out the type as written from ParmVarDecls; it's unclear whether
1254 // the standard (C99 6.9.1p10) requires this, but we're following the
1255 // precedent set by gcc.
1256 QualType Ty;
1257 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1258 Ty = PVD->getOriginalType();
1259 else
1260 Ty = VD->getType();
1261
1262 if (Ty->isVariablyModifiedType())
1264 }
1265 }
1266 // Emit a location at the end of the prologue.
1267 if (CGDebugInfo *DI = getDebugInfo())
1268 DI->EmitLocation(Builder, StartLoc);
1269 // TODO: Do we need to handle this in two places like we do with
1270 // target-features/target-cpu?
1271 if (CurFuncDecl)
1272 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1273 LargestVectorWidth = VecWidth->getVectorWidth();
1274}
1275
1276void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1279 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1281 else
1282 EmitStmt(Body);
1283}
1284
1285/// When instrumenting to collect profile data, the counts for some blocks
1286/// such as switch cases need to not include the fall-through counts, so
1287/// emit a branch around the instrumentation code. When not instrumenting,
1288/// this just calls EmitBlock().
1289void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1290 const Stmt *S) {
1291 llvm::BasicBlock *SkipCountBB = nullptr;
1292 // Do not skip over the instrumentation when single byte coverage mode is
1293 // enabled.
1296 // When instrumenting for profiling, the fallthrough to certain
1297 // statements needs to skip over the instrumentation code so that we
1298 // get an accurate count.
1299 SkipCountBB = createBasicBlock("skipcount");
1300 EmitBranch(SkipCountBB);
1301 }
1302 EmitBlock(BB);
1303 uint64_t CurrentCount = getCurrentProfileCount();
1306 if (SkipCountBB)
1307 EmitBlock(SkipCountBB);
1308}
1309
1310/// Tries to mark the given function nounwind based on the
1311/// non-existence of any throwing calls within it. We believe this is
1312/// lightweight enough to do at -O0.
1313static void TryMarkNoThrow(llvm::Function *F) {
1314 // LLVM treats 'nounwind' on a function as part of the type, so we
1315 // can't do this on functions that can be overwritten.
1316 if (F->isInterposable()) return;
1317
1318 for (llvm::BasicBlock &BB : *F)
1319 for (llvm::Instruction &I : BB)
1320 if (I.mayThrow())
1321 return;
1322
1323 F->setDoesNotThrow();
1324}
1325
1327 FunctionArgList &Args) {
1328 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1329 QualType ResTy = FD->getReturnType();
1330
1331 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1332 if (MD && MD->isImplicitObjectMemberFunction()) {
1333 if (CGM.getCXXABI().HasThisReturn(GD))
1334 ResTy = MD->getThisType();
1335 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1336 ResTy = CGM.getContext().VoidPtrTy;
1337 CGM.getCXXABI().buildThisParam(*this, Args);
1338 }
1339
1340 // The base version of an inheriting constructor whose constructed base is a
1341 // virtual base is not passed any arguments (because it doesn't actually call
1342 // the inherited constructor).
1343 bool PassedParams = true;
1344 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1345 if (auto Inherited = CD->getInheritedConstructor())
1346 PassedParams =
1347 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1348
1349 if (PassedParams) {
1350 for (auto *Param : FD->parameters()) {
1351 Args.push_back(Param);
1352 if (!Param->hasAttr<PassObjectSizeAttr>())
1353 continue;
1354
1356 getContext(), Param->getDeclContext(), Param->getLocation(),
1357 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1358 SizeArguments[Param] = Implicit;
1359 Args.push_back(Implicit);
1360 }
1361 }
1362
1363 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1364 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1365
1366 return ResTy;
1367}
1368
1369void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1370 const CGFunctionInfo &FnInfo) {
1371 assert(Fn && "generating code for null Function");
1372 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1373 CurGD = GD;
1374
1375 FunctionArgList Args;
1376 QualType ResTy = BuildFunctionArgList(GD, Args);
1377
1379
1380 if (FD->isInlineBuiltinDeclaration()) {
1381 // When generating code for a builtin with an inline declaration, use a
1382 // mangled name to hold the actual body, while keeping an external
1383 // definition in case the function pointer is referenced somewhere.
1384 std::string FDInlineName = (Fn->getName() + ".inline").str();
1385 llvm::Module *M = Fn->getParent();
1386 llvm::Function *Clone = M->getFunction(FDInlineName);
1387 if (!Clone) {
1388 Clone = llvm::Function::Create(Fn->getFunctionType(),
1389 llvm::GlobalValue::InternalLinkage,
1390 Fn->getAddressSpace(), FDInlineName, M);
1391 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1392 }
1393 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1394 Fn = Clone;
1395 } else {
1396 // Detect the unusual situation where an inline version is shadowed by a
1397 // non-inline version. In that case we should pick the external one
1398 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1399 // to detect that situation before we reach codegen, so do some late
1400 // replacement.
1401 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1402 PD = PD->getPreviousDecl()) {
1403 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1404 std::string FDInlineName = (Fn->getName() + ".inline").str();
1405 llvm::Module *M = Fn->getParent();
1406 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1407 Clone->replaceAllUsesWith(Fn);
1408 Clone->eraseFromParent();
1409 }
1410 break;
1411 }
1412 }
1413 }
1414
1415 // Check if we should generate debug info for this function.
1416 if (FD->hasAttr<NoDebugAttr>()) {
1417 // Clear non-distinct debug info that was possibly attached to the function
1418 // due to an earlier declaration without the nodebug attribute
1419 Fn->setSubprogram(nullptr);
1420 // Disable debug info indefinitely for this function
1421 DebugInfo = nullptr;
1422 }
1423
1424 // The function might not have a body if we're generating thunks for a
1425 // function declaration.
1426 SourceRange BodyRange;
1427 if (Stmt *Body = FD->getBody())
1428 BodyRange = Body->getSourceRange();
1429 else
1430 BodyRange = FD->getLocation();
1431 CurEHLocation = BodyRange.getEnd();
1432
1433 // Use the location of the start of the function to determine where
1434 // the function definition is located. By default use the location
1435 // of the declaration as the location for the subprogram. A function
1436 // may lack a declaration in the source code if it is created by code
1437 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1438 SourceLocation Loc = FD->getLocation();
1439
1440 // If this is a function specialization then use the pattern body
1441 // as the location for the function.
1442 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1443 if (SpecDecl->hasBody(SpecDecl))
1444 Loc = SpecDecl->getLocation();
1445
1446 Stmt *Body = FD->getBody();
1447
1448 if (Body) {
1449 // Coroutines always emit lifetime markers.
1450 if (isa<CoroutineBodyStmt>(Body))
1451 ShouldEmitLifetimeMarkers = true;
1452
1453 // Initialize helper which will detect jumps which can cause invalid
1454 // lifetime markers.
1455 if (ShouldEmitLifetimeMarkers)
1456 Bypasses.Init(Body);
1457 }
1458
1459 // Emit the standard function prologue.
1460 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1461
1462 // Save parameters for coroutine function.
1463 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1464 llvm::append_range(FnArgs, FD->parameters());
1465
1466 // Ensure that the function adheres to the forward progress guarantee, which
1467 // is required by certain optimizations.
1469 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1470
1471 // Generate the body of the function.
1472 PGO.assignRegionCounters(GD, CurFn);
1473 if (isa<CXXDestructorDecl>(FD))
1474 EmitDestructorBody(Args);
1475 else if (isa<CXXConstructorDecl>(FD))
1476 EmitConstructorBody(Args);
1477 else if (getLangOpts().CUDA &&
1478 !getLangOpts().CUDAIsDevice &&
1479 FD->hasAttr<CUDAGlobalAttr>())
1480 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1481 else if (isa<CXXMethodDecl>(FD) &&
1482 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1483 // The lambda static invoker function is special, because it forwards or
1484 // clones the body of the function call operator (but is actually static).
1485 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1486 } else if (isa<CXXMethodDecl>(FD) &&
1487 isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
1488 !FnInfo.isDelegateCall() &&
1489 cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1490 hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1491 // If emitting a lambda with static invoker on X86 Windows, change
1492 // the call operator body.
1493 // Make sure that this is a call operator with an inalloca arg and check
1494 // for delegate call to make sure this is the original call op and not the
1495 // new forwarding function for the static invoker.
1496 EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
1497 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1498 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1499 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1500 // Implicit copy-assignment gets the same special treatment as implicit
1501 // copy-constructors.
1503 } else if (Body) {
1504 EmitFunctionBody(Body);
1505 } else
1506 llvm_unreachable("no definition for emitted function");
1507
1508 // C++11 [stmt.return]p2:
1509 // Flowing off the end of a function [...] results in undefined behavior in
1510 // a value-returning function.
1511 // C11 6.9.1p12:
1512 // If the '}' that terminates a function is reached, and the value of the
1513 // function call is used by the caller, the behavior is undefined.
1515 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1516 bool ShouldEmitUnreachable =
1517 CGM.getCodeGenOpts().StrictReturn ||
1519 if (SanOpts.has(SanitizerKind::Return)) {
1520 SanitizerScope SanScope(this);
1521 llvm::Value *IsFalse = Builder.getFalse();
1522 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1523 SanitizerHandler::MissingReturn,
1524 EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1525 } else if (ShouldEmitUnreachable) {
1526 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1527 EmitTrapCall(llvm::Intrinsic::trap);
1528 }
1529 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1530 Builder.CreateUnreachable();
1531 Builder.ClearInsertionPoint();
1532 }
1533 }
1534
1535 // Emit the standard function epilogue.
1536 FinishFunction(BodyRange.getEnd());
1537
1538 // If we haven't marked the function nothrow through other means, do
1539 // a quick pass now to see if we can.
1540 if (!CurFn->doesNotThrow())
1542}
1543
1544/// ContainsLabel - Return true if the statement contains a label in it. If
1545/// this statement is not executed normally, it not containing a label means
1546/// that we can just remove the code.
1547bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1548 // Null statement, not a label!
1549 if (!S) return false;
1550
1551 // If this is a label, we have to emit the code, consider something like:
1552 // if (0) { ... foo: bar(); } goto foo;
1553 //
1554 // TODO: If anyone cared, we could track __label__'s, since we know that you
1555 // can't jump to one from outside their declared region.
1556 if (isa<LabelStmt>(S))
1557 return true;
1558
1559 // If this is a case/default statement, and we haven't seen a switch, we have
1560 // to emit the code.
1561 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1562 return true;
1563
1564 // If this is a switch statement, we want to ignore cases below it.
1565 if (isa<SwitchStmt>(S))
1566 IgnoreCaseStmts = true;
1567
1568 // Scan subexpressions for verboten labels.
1569 for (const Stmt *SubStmt : S->children())
1570 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1571 return true;
1572
1573 return false;
1574}
1575
1576/// containsBreak - Return true if the statement contains a break out of it.
1577/// If the statement (recursively) contains a switch or loop with a break
1578/// inside of it, this is fine.
1579bool CodeGenFunction::containsBreak(const Stmt *S) {
1580 // Null statement, not a label!
1581 if (!S) return false;
1582
1583 // If this is a switch or loop that defines its own break scope, then we can
1584 // include it and anything inside of it.
1585 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1586 isa<ForStmt>(S))
1587 return false;
1588
1589 if (isa<BreakStmt>(S))
1590 return true;
1591
1592 // Scan subexpressions for verboten breaks.
1593 for (const Stmt *SubStmt : S->children())
1594 if (containsBreak(SubStmt))
1595 return true;
1596
1597 return false;
1598}
1599
1601 if (!S) return false;
1602
1603 // Some statement kinds add a scope and thus never add a decl to the current
1604 // scope. Note, this list is longer than the list of statements that might
1605 // have an unscoped decl nested within them, but this way is conservatively
1606 // correct even if more statement kinds are added.
1607 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1608 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1609 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1610 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1611 return false;
1612
1613 if (isa<DeclStmt>(S))
1614 return true;
1615
1616 for (const Stmt *SubStmt : S->children())
1617 if (mightAddDeclToScope(SubStmt))
1618 return true;
1619
1620 return false;
1621}
1622
1623/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1624/// to a constant, or if it does but contains a label, return false. If it
1625/// constant folds return true and set the boolean result in Result.
1627 bool &ResultBool,
1628 bool AllowLabels) {
1629 // If MC/DC is enabled, disable folding so that we can instrument all
1630 // conditions to yield complete test vectors. We still keep track of
1631 // folded conditions during region mapping and visualization.
1632 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1633 CGM.getCodeGenOpts().MCDCCoverage)
1634 return false;
1635
1636 llvm::APSInt ResultInt;
1637 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1638 return false;
1639
1640 ResultBool = ResultInt.getBoolValue();
1641 return true;
1642}
1643
1644/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1645/// to a constant, or if it does but contains a label, return false. If it
1646/// constant folds return true and set the folded value.
1648 llvm::APSInt &ResultInt,
1649 bool AllowLabels) {
1650 // FIXME: Rename and handle conversion of other evaluatable things
1651 // to bool.
1653 if (!Cond->EvaluateAsInt(Result, getContext()))
1654 return false; // Not foldable, not integer or not fully evaluatable.
1655
1656 llvm::APSInt Int = Result.Val.getInt();
1657 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1658 return false; // Contains a label.
1659
1660 ResultInt = Int;
1661 return true;
1662}
1663
1664/// Strip parentheses and simplistic logical-NOT operators.
1665const Expr *CodeGenFunction::stripCond(const Expr *C) {
1666 while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
1667 if (Op->getOpcode() != UO_LNot)
1668 break;
1669 C = Op->getSubExpr();
1670 }
1671 return C->IgnoreParens();
1672}
1673
1674/// Determine whether the given condition is an instrumentable condition
1675/// (i.e. no "&&" or "||").
1677 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1678 return (!BOp || !BOp->isLogicalOp());
1679}
1680
1681/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1682/// increments a profile counter based on the semantics of the given logical
1683/// operator opcode. This is used to instrument branch condition coverage for
1684/// logical operators.
1686 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1687 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1688 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1689 // If not instrumenting, just emit a branch.
1690 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1691 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1692 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1693
1694 llvm::BasicBlock *ThenBlock = nullptr;
1695 llvm::BasicBlock *ElseBlock = nullptr;
1696 llvm::BasicBlock *NextBlock = nullptr;
1697
1698 // Create the block we'll use to increment the appropriate counter.
1699 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1700
1701 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1702 // means we need to evaluate the condition and increment the counter on TRUE:
1703 //
1704 // if (Cond)
1705 // goto CounterIncrBlock;
1706 // else
1707 // goto FalseBlock;
1708 //
1709 // CounterIncrBlock:
1710 // Counter++;
1711 // goto TrueBlock;
1712
1713 if (LOp == BO_LAnd) {
1714 ThenBlock = CounterIncrBlock;
1715 ElseBlock = FalseBlock;
1716 NextBlock = TrueBlock;
1717 }
1718
1719 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1720 // we need to evaluate the condition and increment the counter on FALSE:
1721 //
1722 // if (Cond)
1723 // goto TrueBlock;
1724 // else
1725 // goto CounterIncrBlock;
1726 //
1727 // CounterIncrBlock:
1728 // Counter++;
1729 // goto FalseBlock;
1730
1731 else if (LOp == BO_LOr) {
1732 ThenBlock = TrueBlock;
1733 ElseBlock = CounterIncrBlock;
1734 NextBlock = FalseBlock;
1735 } else {
1736 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1737 }
1738
1739 // Emit Branch based on condition.
1740 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1741
1742 // Emit the block containing the counter increment(s).
1743 EmitBlock(CounterIncrBlock);
1744
1745 // Increment corresponding counter; if index not provided, use Cond as index.
1746 incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1747
1748 // Go to the next block.
1749 EmitBranch(NextBlock);
1750}
1751
1752/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1753/// statement) to the specified blocks. Based on the condition, this might try
1754/// to simplify the codegen of the conditional based on the branch.
1755/// \param LH The value of the likelihood attribute on the True branch.
1756/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1757/// ConditionalOperator (ternary) through a recursive call for the operator's
1758/// LHS and RHS nodes.
1760 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1761 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp) {
1762 Cond = Cond->IgnoreParens();
1763
1764 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1765 // Handle X && Y in a condition.
1766 if (CondBOp->getOpcode() == BO_LAnd) {
1767 MCDCLogOpStack.push_back(CondBOp);
1768
1769 // If we have "1 && X", simplify the code. "0 && X" would have constant
1770 // folded if the case was simple enough.
1771 bool ConstantBool = false;
1772 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1773 ConstantBool) {
1774 // br(1 && X) -> br(X).
1775 incrementProfileCounter(CondBOp);
1776 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1777 FalseBlock, TrueCount, LH);
1778 MCDCLogOpStack.pop_back();
1779 return;
1780 }
1781
1782 // If we have "X && 1", simplify the code to use an uncond branch.
1783 // "X && 0" would have been constant folded to 0.
1784 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1785 ConstantBool) {
1786 // br(X && 1) -> br(X).
1787 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1788 FalseBlock, TrueCount, LH, CondBOp);
1789 MCDCLogOpStack.pop_back();
1790 return;
1791 }
1792
1793 // Emit the LHS as a conditional. If the LHS conditional is false, we
1794 // want to jump to the FalseBlock.
1795 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1796 // The counter tells us how often we evaluate RHS, and all of TrueCount
1797 // can be propagated to that branch.
1798 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1799
1800 ConditionalEvaluation eval(*this);
1801 {
1802 ApplyDebugLocation DL(*this, Cond);
1803 // Propagate the likelihood attribute like __builtin_expect
1804 // __builtin_expect(X && Y, 1) -> X and Y are likely
1805 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1806 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1807 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1808 EmitBlock(LHSTrue);
1809 }
1810
1811 incrementProfileCounter(CondBOp);
1812 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1813
1814 // Any temporaries created here are conditional.
1815 eval.begin(*this);
1816 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1817 FalseBlock, TrueCount, LH);
1818 eval.end(*this);
1819 MCDCLogOpStack.pop_back();
1820 return;
1821 }
1822
1823 if (CondBOp->getOpcode() == BO_LOr) {
1824 MCDCLogOpStack.push_back(CondBOp);
1825
1826 // If we have "0 || X", simplify the code. "1 || X" would have constant
1827 // folded if the case was simple enough.
1828 bool ConstantBool = false;
1829 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1830 !ConstantBool) {
1831 // br(0 || X) -> br(X).
1832 incrementProfileCounter(CondBOp);
1833 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1834 FalseBlock, TrueCount, LH);
1835 MCDCLogOpStack.pop_back();
1836 return;
1837 }
1838
1839 // If we have "X || 0", simplify the code to use an uncond branch.
1840 // "X || 1" would have been constant folded to 1.
1841 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1842 !ConstantBool) {
1843 // br(X || 0) -> br(X).
1844 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1845 FalseBlock, TrueCount, LH, CondBOp);
1846 MCDCLogOpStack.pop_back();
1847 return;
1848 }
1849 // Emit the LHS as a conditional. If the LHS conditional is true, we
1850 // want to jump to the TrueBlock.
1851 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1852 // We have the count for entry to the RHS and for the whole expression
1853 // being true, so we can divy up True count between the short circuit and
1854 // the RHS.
1855 uint64_t LHSCount =
1856 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1857 uint64_t RHSCount = TrueCount - LHSCount;
1858
1859 ConditionalEvaluation eval(*this);
1860 {
1861 // Propagate the likelihood attribute like __builtin_expect
1862 // __builtin_expect(X || Y, 1) -> only Y is likely
1863 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1864 ApplyDebugLocation DL(*this, Cond);
1865 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1866 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1867 EmitBlock(LHSFalse);
1868 }
1869
1870 incrementProfileCounter(CondBOp);
1871 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1872
1873 // Any temporaries created here are conditional.
1874 eval.begin(*this);
1875 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1876 RHSCount, LH);
1877
1878 eval.end(*this);
1879 MCDCLogOpStack.pop_back();
1880 return;
1881 }
1882 }
1883
1884 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1885 // br(!x, t, f) -> br(x, f, t)
1886 // Avoid doing this optimization when instrumenting a condition for MC/DC.
1887 // LNot is taken as part of the condition for simplicity, and changing its
1888 // sense negatively impacts test vector tracking.
1889 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
1890 CGM.getCodeGenOpts().MCDCCoverage &&
1892 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
1893 // Negate the count.
1894 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1895 // The values of the enum are chosen to make this negation possible.
1896 LH = static_cast<Stmt::Likelihood>(-LH);
1897 // Negate the condition and swap the destination blocks.
1898 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1899 FalseCount, LH);
1900 }
1901 }
1902
1903 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1904 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1905 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1906 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1907
1908 // The ConditionalOperator itself has no likelihood information for its
1909 // true and false branches. This matches the behavior of __builtin_expect.
1910 ConditionalEvaluation cond(*this);
1911 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1913
1914 // When computing PGO branch weights, we only know the overall count for
1915 // the true block. This code is essentially doing tail duplication of the
1916 // naive code-gen, introducing new edges for which counts are not
1917 // available. Divide the counts proportionally between the LHS and RHS of
1918 // the conditional operator.
1919 uint64_t LHSScaledTrueCount = 0;
1920 if (TrueCount) {
1921 double LHSRatio =
1923 LHSScaledTrueCount = TrueCount * LHSRatio;
1924 }
1925
1926 cond.begin(*this);
1927 EmitBlock(LHSBlock);
1929 {
1930 ApplyDebugLocation DL(*this, Cond);
1931 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1932 LHSScaledTrueCount, LH, CondOp);
1933 }
1934 cond.end(*this);
1935
1936 cond.begin(*this);
1937 EmitBlock(RHSBlock);
1938 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1939 TrueCount - LHSScaledTrueCount, LH, CondOp);
1940 cond.end(*this);
1941
1942 return;
1943 }
1944
1945 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1946 // Conditional operator handling can give us a throw expression as a
1947 // condition for a case like:
1948 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1949 // Fold this to:
1950 // br(c, throw x, br(y, t, f))
1951 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1952 return;
1953 }
1954
1955 // Emit the code with the fully general case.
1956 llvm::Value *CondV;
1957 {
1958 ApplyDebugLocation DL(*this, Cond);
1959 CondV = EvaluateExprAsBool(Cond);
1960 }
1961
1962 // If not at the top of the logical operator nest, update MCDC temp with the
1963 // boolean result of the evaluated condition.
1964 if (!MCDCLogOpStack.empty()) {
1965 const Expr *MCDCBaseExpr = Cond;
1966 // When a nested ConditionalOperator (ternary) is encountered in a boolean
1967 // expression, MC/DC tracks the result of the ternary, and this is tied to
1968 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
1969 // this is the case, the ConditionalOperator expression is passed through
1970 // the ConditionalOp parameter and then used as the MCDC base expression.
1971 if (ConditionalOp)
1972 MCDCBaseExpr = ConditionalOp;
1973
1974 maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
1975 }
1976
1977 llvm::MDNode *Weights = nullptr;
1978 llvm::MDNode *Unpredictable = nullptr;
1979
1980 // If the branch has a condition wrapped by __builtin_unpredictable,
1981 // create metadata that specifies that the branch is unpredictable.
1982 // Don't bother if not optimizing because that metadata would not be used.
1983 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1984 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1985 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1986 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1987 llvm::MDBuilder MDHelper(getLLVMContext());
1988 Unpredictable = MDHelper.createUnpredictable();
1989 }
1990 }
1991
1992 // If there is a Likelihood knowledge for the cond, lower it.
1993 // Note that if not optimizing this won't emit anything.
1994 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1995 if (CondV != NewCondV)
1996 CondV = NewCondV;
1997 else {
1998 // Otherwise, lower profile counts. Note that we do this even at -O0.
1999 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2000 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2001 }
2002
2003 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
2004}
2005
2006/// ErrorUnsupported - Print out an error that codegen doesn't support the
2007/// specified stmt yet.
2008void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2010}
2011
2012/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2013/// variable-length array whose elements have a non-zero bit-pattern.
2014///
2015/// \param baseType the inner-most element type of the array
2016/// \param src - a char* pointing to the bit-pattern for a single
2017/// base element of the array
2018/// \param sizeInChars - the total size of the VLA, in chars
2020 Address dest, Address src,
2021 llvm::Value *sizeInChars) {
2023
2024 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2025 llvm::Value *baseSizeInChars
2026 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2027
2028 Address begin = dest.withElementType(CGF.Int8Ty);
2029 llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2030 begin.emitRawPointer(CGF),
2031 sizeInChars, "vla.end");
2032
2033 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2034 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2035 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2036
2037 // Make a loop over the VLA. C99 guarantees that the VLA element
2038 // count must be nonzero.
2039 CGF.EmitBlock(loopBB);
2040
2041 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2042 cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2043
2044 CharUnits curAlign =
2045 dest.getAlignment().alignmentOfArrayElement(baseSize);
2046
2047 // memcpy the individual element bit-pattern.
2048 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2049 /*volatile*/ false);
2050
2051 // Go to the next element.
2052 llvm::Value *next =
2053 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2054
2055 // Leave if that's the end of the VLA.
2056 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2057 Builder.CreateCondBr(done, contBB, loopBB);
2058 cur->addIncoming(next, loopBB);
2059
2060 CGF.EmitBlock(contBB);
2061}
2062
2063void
2065 // Ignore empty classes in C++.
2066 if (getLangOpts().CPlusPlus) {
2067 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2068 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
2069 return;
2070 }
2071 }
2072
2073 if (DestPtr.getElementType() != Int8Ty)
2074 DestPtr = DestPtr.withElementType(Int8Ty);
2075
2076 // Get size and alignment info for this aggregate.
2078
2079 llvm::Value *SizeVal;
2080 const VariableArrayType *vla;
2081
2082 // Don't bother emitting a zero-byte memset.
2083 if (size.isZero()) {
2084 // But note that getTypeInfo returns 0 for a VLA.
2085 if (const VariableArrayType *vlaType =
2086 dyn_cast_or_null<VariableArrayType>(
2087 getContext().getAsArrayType(Ty))) {
2088 auto VlaSize = getVLASize(vlaType);
2089 SizeVal = VlaSize.NumElts;
2090 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2091 if (!eltSize.isOne())
2092 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2093 vla = vlaType;
2094 } else {
2095 return;
2096 }
2097 } else {
2098 SizeVal = CGM.getSize(size);
2099 vla = nullptr;
2100 }
2101
2102 // If the type contains a pointer to data member we can't memset it to zero.
2103 // Instead, create a null constant and copy it to the destination.
2104 // TODO: there are other patterns besides zero that we can usefully memset,
2105 // like -1, which happens to be the pattern used by member-pointers.
2106 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2107 // For a VLA, emit a single element, then splat that over the VLA.
2108 if (vla) Ty = getContext().getBaseElementType(vla);
2109
2110 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2111
2112 llvm::GlobalVariable *NullVariable =
2113 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2114 /*isConstant=*/true,
2115 llvm::GlobalVariable::PrivateLinkage,
2116 NullConstant, Twine());
2117 CharUnits NullAlign = DestPtr.getAlignment();
2118 NullVariable->setAlignment(NullAlign.getAsAlign());
2119 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2120
2121 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2122
2123 // Get and call the appropriate llvm.memcpy overload.
2124 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2125 return;
2126 }
2127
2128 // Otherwise, just memset the whole thing to zero. This is legal
2129 // because in LLVM, all default initializers (other than the ones we just
2130 // handled above) are guaranteed to have a bit pattern of all zeros.
2131 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2132}
2133
2134llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2135 // Make sure that there is a block for the indirect goto.
2136 if (!IndirectBranch)
2138
2139 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2140
2141 // Make sure the indirect branch includes all of the address-taken blocks.
2142 IndirectBranch->addDestination(BB);
2143 return llvm::BlockAddress::get(CurFn, BB);
2144}
2145
2146llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2147 // If we already made the indirect branch for indirect goto, return its block.
2148 if (IndirectBranch) return IndirectBranch->getParent();
2149
2150 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2151
2152 // Create the PHI node that indirect gotos will add entries to.
2153 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2154 "indirect.goto.dest");
2155
2156 // Create the indirect branch instruction.
2157 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2158 return IndirectBranch->getParent();
2159}
2160
2161/// Computes the length of an array in elements, as well as the base
2162/// element type and a properly-typed first element pointer.
2163llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2164 QualType &baseType,
2165 Address &addr) {
2166 const ArrayType *arrayType = origArrayType;
2167
2168 // If it's a VLA, we have to load the stored size. Note that
2169 // this is the size of the VLA in bytes, not its size in elements.
2170 llvm::Value *numVLAElements = nullptr;
2171 if (isa<VariableArrayType>(arrayType)) {
2172 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2173
2174 // Walk into all VLAs. This doesn't require changes to addr,
2175 // which has type T* where T is the first non-VLA element type.
2176 do {
2177 QualType elementType = arrayType->getElementType();
2178 arrayType = getContext().getAsArrayType(elementType);
2179
2180 // If we only have VLA components, 'addr' requires no adjustment.
2181 if (!arrayType) {
2182 baseType = elementType;
2183 return numVLAElements;
2184 }
2185 } while (isa<VariableArrayType>(arrayType));
2186
2187 // We get out here only if we find a constant array type
2188 // inside the VLA.
2189 }
2190
2191 // We have some number of constant-length arrays, so addr should
2192 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2193 // down to the first element of addr.
2195
2196 // GEP down to the array type.
2197 llvm::ConstantInt *zero = Builder.getInt32(0);
2198 gepIndices.push_back(zero);
2199
2200 uint64_t countFromCLAs = 1;
2201 QualType eltType;
2202
2203 llvm::ArrayType *llvmArrayType =
2204 dyn_cast<llvm::ArrayType>(addr.getElementType());
2205 while (llvmArrayType) {
2206 assert(isa<ConstantArrayType>(arrayType));
2207 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2208 llvmArrayType->getNumElements());
2209
2210 gepIndices.push_back(zero);
2211 countFromCLAs *= llvmArrayType->getNumElements();
2212 eltType = arrayType->getElementType();
2213
2214 llvmArrayType =
2215 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2216 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2217 assert((!llvmArrayType || arrayType) &&
2218 "LLVM and Clang types are out-of-synch");
2219 }
2220
2221 if (arrayType) {
2222 // From this point onwards, the Clang array type has been emitted
2223 // as some other type (probably a packed struct). Compute the array
2224 // size, and just emit the 'begin' expression as a bitcast.
2225 while (arrayType) {
2226 countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2227 eltType = arrayType->getElementType();
2228 arrayType = getContext().getAsArrayType(eltType);
2229 }
2230
2231 llvm::Type *baseType = ConvertType(eltType);
2232 addr = addr.withElementType(baseType);
2233 } else {
2234 // Create the actual GEP.
2236 addr.emitRawPointer(*this),
2237 gepIndices, "array.begin"),
2238 ConvertTypeForMem(eltType), addr.getAlignment());
2239 }
2240
2241 baseType = eltType;
2242
2243 llvm::Value *numElements
2244 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2245
2246 // If we had any VLA dimensions, factor them in.
2247 if (numVLAElements)
2248 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2249
2250 return numElements;
2251}
2252
2253CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2255 assert(vla && "type was not a variable array type!");
2256 return getVLASize(vla);
2257}
2258
2259CodeGenFunction::VlaSizePair
2261 // The number of elements so far; always size_t.
2262 llvm::Value *numElements = nullptr;
2263
2264 QualType elementType;
2265 do {
2266 elementType = type->getElementType();
2267 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2268 assert(vlaSize && "no size for VLA!");
2269 assert(vlaSize->getType() == SizeTy);
2270
2271 if (!numElements) {
2272 numElements = vlaSize;
2273 } else {
2274 // It's undefined behavior if this wraps around, so mark it that way.
2275 // FIXME: Teach -fsanitize=undefined to trap this.
2276 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2277 }
2278 } while ((type = getContext().getAsVariableArrayType(elementType)));
2279
2280 return { numElements, elementType };
2281}
2282
2283CodeGenFunction::VlaSizePair
2286 assert(vla && "type was not a variable array type!");
2287 return getVLAElements1D(vla);
2288}
2289
2290CodeGenFunction::VlaSizePair
2292 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2293 assert(VlaSize && "no size for VLA!");
2294 assert(VlaSize->getType() == SizeTy);
2295 return { VlaSize, Vla->getElementType() };
2296}
2297
2299 assert(type->isVariablyModifiedType() &&
2300 "Must pass variably modified type to EmitVLASizes!");
2301
2303
2304 // We're going to walk down into the type and look for VLA
2305 // expressions.
2306 do {
2307 assert(type->isVariablyModifiedType());
2308
2309 const Type *ty = type.getTypePtr();
2310 switch (ty->getTypeClass()) {
2311
2312#define TYPE(Class, Base)
2313#define ABSTRACT_TYPE(Class, Base)
2314#define NON_CANONICAL_TYPE(Class, Base)
2315#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2316#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2317#include "clang/AST/TypeNodes.inc"
2318 llvm_unreachable("unexpected dependent type!");
2319
2320 // These types are never variably-modified.
2321 case Type::Builtin:
2322 case Type::Complex:
2323 case Type::Vector:
2324 case Type::ExtVector:
2325 case Type::ConstantMatrix:
2326 case Type::Record:
2327 case Type::Enum:
2328 case Type::Using:
2329 case Type::TemplateSpecialization:
2330 case Type::ObjCTypeParam:
2331 case Type::ObjCObject:
2332 case Type::ObjCInterface:
2333 case Type::ObjCObjectPointer:
2334 case Type::BitInt:
2335 llvm_unreachable("type class is never variably-modified!");
2336
2337 case Type::Elaborated:
2338 type = cast<ElaboratedType>(ty)->getNamedType();
2339 break;
2340
2341 case Type::Adjusted:
2342 type = cast<AdjustedType>(ty)->getAdjustedType();
2343 break;
2344
2345 case Type::Decayed:
2346 type = cast<DecayedType>(ty)->getPointeeType();
2347 break;
2348
2349 case Type::Pointer:
2350 type = cast<PointerType>(ty)->getPointeeType();
2351 break;
2352
2353 case Type::BlockPointer:
2354 type = cast<BlockPointerType>(ty)->getPointeeType();
2355 break;
2356
2357 case Type::LValueReference:
2358 case Type::RValueReference:
2359 type = cast<ReferenceType>(ty)->getPointeeType();
2360 break;
2361
2362 case Type::MemberPointer:
2363 type = cast<MemberPointerType>(ty)->getPointeeType();
2364 break;
2365
2366 case Type::ArrayParameter:
2367 case Type::ConstantArray:
2368 case Type::IncompleteArray:
2369 // Losing element qualification here is fine.
2370 type = cast<ArrayType>(ty)->getElementType();
2371 break;
2372
2373 case Type::VariableArray: {
2374 // Losing element qualification here is fine.
2375 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2376
2377 // Unknown size indication requires no size computation.
2378 // Otherwise, evaluate and record it.
2379 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2380 // It's possible that we might have emitted this already,
2381 // e.g. with a typedef and a pointer to it.
2382 llvm::Value *&entry = VLASizeMap[sizeExpr];
2383 if (!entry) {
2384 llvm::Value *size = EmitScalarExpr(sizeExpr);
2385
2386 // C11 6.7.6.2p5:
2387 // If the size is an expression that is not an integer constant
2388 // expression [...] each time it is evaluated it shall have a value
2389 // greater than zero.
2390 if (SanOpts.has(SanitizerKind::VLABound)) {
2391 SanitizerScope SanScope(this);
2392 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2393 clang::QualType SEType = sizeExpr->getType();
2394 llvm::Value *CheckCondition =
2395 SEType->isSignedIntegerType()
2396 ? Builder.CreateICmpSGT(size, Zero)
2397 : Builder.CreateICmpUGT(size, Zero);
2398 llvm::Constant *StaticArgs[] = {
2399 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2400 EmitCheckTypeDescriptor(SEType)};
2401 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2402 SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2403 }
2404
2405 // Always zexting here would be wrong if it weren't
2406 // undefined behavior to have a negative bound.
2407 // FIXME: What about when size's type is larger than size_t?
2408 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2409 }
2410 }
2411 type = vat->getElementType();
2412 break;
2413 }
2414
2415 case Type::FunctionProto:
2416 case Type::FunctionNoProto:
2417 type = cast<FunctionType>(ty)->getReturnType();
2418 break;
2419
2420 case Type::Paren:
2421 case Type::TypeOf:
2422 case Type::UnaryTransform:
2423 case Type::Attributed:
2424 case Type::BTFTagAttributed:
2425 case Type::SubstTemplateTypeParm:
2426 case Type::MacroQualified:
2427 case Type::CountAttributed:
2428 // Keep walking after single level desugaring.
2429 type = type.getSingleStepDesugaredType(getContext());
2430 break;
2431
2432 case Type::Typedef:
2433 case Type::Decltype:
2434 case Type::Auto:
2435 case Type::DeducedTemplateSpecialization:
2436 case Type::PackIndexing:
2437 // Stop walking: nothing to do.
2438 return;
2439
2440 case Type::TypeOfExpr:
2441 // Stop walking: emit typeof expression.
2442 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2443 return;
2444
2445 case Type::Atomic:
2446 type = cast<AtomicType>(ty)->getValueType();
2447 break;
2448
2449 case Type::Pipe:
2450 type = cast<PipeType>(ty)->getElementType();
2451 break;
2452 }
2453 } while (type->isVariablyModifiedType());
2454}
2455
2457 if (getContext().getBuiltinVaListType()->isArrayType())
2458 return EmitPointerWithAlignment(E);
2459 return EmitLValue(E).getAddress(*this);
2460}
2461
2463 return EmitLValue(E).getAddress(*this);
2464}
2465
2467 const APValue &Init) {
2468 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2469 if (CGDebugInfo *Dbg = getDebugInfo())
2471 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2472}
2473
2474CodeGenFunction::PeepholeProtection
2476 // At the moment, the only aggressive peephole we do in IR gen
2477 // is trunc(zext) folding, but if we add more, we can easily
2478 // extend this protection.
2479
2480 if (!rvalue.isScalar()) return PeepholeProtection();
2481 llvm::Value *value = rvalue.getScalarVal();
2482 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2483
2484 // Just make an extra bitcast.
2485 assert(HaveInsertPoint());
2486 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2487 Builder.GetInsertBlock());
2488
2489 PeepholeProtection protection;
2490 protection.Inst = inst;
2491 return protection;
2492}
2493
2494void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2495 if (!protection.Inst) return;
2496
2497 // In theory, we could try to duplicate the peepholes now, but whatever.
2498 protection.Inst->eraseFromParent();
2499}
2500
2501void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2502 QualType Ty, SourceLocation Loc,
2503 SourceLocation AssumptionLoc,
2504 llvm::Value *Alignment,
2505 llvm::Value *OffsetValue) {
2506 if (Alignment->getType() != IntPtrTy)
2507 Alignment =
2508 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2509 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2510 OffsetValue =
2511 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2512 llvm::Value *TheCheck = nullptr;
2513 if (SanOpts.has(SanitizerKind::Alignment)) {
2514 llvm::Value *PtrIntValue =
2515 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2516
2517 if (OffsetValue) {
2518 bool IsOffsetZero = false;
2519 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2520 IsOffsetZero = CI->isZero();
2521
2522 if (!IsOffsetZero)
2523 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2524 }
2525
2526 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2527 llvm::Value *Mask =
2528 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2529 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2530 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2531 }
2532 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2533 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2534
2535 if (!SanOpts.has(SanitizerKind::Alignment))
2536 return;
2537 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2538 OffsetValue, TheCheck, Assumption);
2539}
2540
2541void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2542 const Expr *E,
2543 SourceLocation AssumptionLoc,
2544 llvm::Value *Alignment,
2545 llvm::Value *OffsetValue) {
2546 QualType Ty = E->getType();
2547 SourceLocation Loc = E->getExprLoc();
2548
2549 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2550 OffsetValue);
2551}
2552
2553llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2554 llvm::Value *AnnotatedVal,
2555 StringRef AnnotationStr,
2556 SourceLocation Location,
2557 const AnnotateAttr *Attr) {
2559 AnnotatedVal,
2560 CGM.EmitAnnotationString(AnnotationStr),
2561 CGM.EmitAnnotationUnit(Location),
2562 CGM.EmitAnnotationLineNo(Location),
2563 };
2564 if (Attr)
2565 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2566 return Builder.CreateCall(AnnotationFn, Args);
2567}
2568
2569void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2570 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2571 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2572 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2573 {V->getType(), CGM.ConstGlobalsPtrTy}),
2574 V, I->getAnnotation(), D->getLocation(), I);
2575}
2576
2578 Address Addr) {
2579 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2580 llvm::Value *V = Addr.emitRawPointer(*this);
2581 llvm::Type *VTy = V->getType();
2582 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2583 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2584 llvm::PointerType *IntrinTy =
2585 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2586 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2587 {IntrinTy, CGM.ConstGlobalsPtrTy});
2588
2589 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2590 // FIXME Always emit the cast inst so we can differentiate between
2591 // annotation on the first field of a struct and annotation on the struct
2592 // itself.
2593 if (VTy != IntrinTy)
2594 V = Builder.CreateBitCast(V, IntrinTy);
2595 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2596 V = Builder.CreateBitCast(V, VTy);
2597 }
2598
2599 return Address(V, Addr.getElementType(), Addr.getAlignment());
2600}
2601
2603
2605 : CGF(CGF) {
2606 assert(!CGF->IsSanitizerScope);
2607 CGF->IsSanitizerScope = true;
2608}
2609
2611 CGF->IsSanitizerScope = false;
2612}
2613
2614void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2615 const llvm::Twine &Name,
2616 llvm::BasicBlock *BB,
2617 llvm::BasicBlock::iterator InsertPt) const {
2619 if (IsSanitizerScope)
2620 I->setNoSanitizeMetadata();
2621}
2622
2624 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2625 llvm::BasicBlock::iterator InsertPt) const {
2626 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2627 if (CGF)
2628 CGF->InsertHelper(I, Name, BB, InsertPt);
2629}
2630
2631// Emits an error if we don't have a valid set of target features for the
2632// called function.
2634 const FunctionDecl *TargetDecl) {
2635 // SemaChecking cannot handle below x86 builtins because they have different
2636 // parameter ranges with different TargetAttribute of caller.
2637 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2638 unsigned BuiltinID = TargetDecl->getBuiltinID();
2639 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2640 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2641 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2642 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2643 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2644 llvm::StringMap<bool> TargetFetureMap;
2645 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2646 llvm::APSInt Result =
2648 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2649 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2650 << TargetDecl->getDeclName() << "avx";
2651 }
2652 }
2653 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2654}
2655
2656// Emits an error if we don't have a valid set of target features for the
2657// called function.
2659 const FunctionDecl *TargetDecl) {
2660 // Early exit if this is an indirect call.
2661 if (!TargetDecl)
2662 return;
2663
2664 // Get the current enclosing function if it exists. If it doesn't
2665 // we can't check the target features anyhow.
2666 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2667 if (!FD)
2668 return;
2669
2670 // Grab the required features for the call. For a builtin this is listed in
2671 // the td file with the default cpu, for an always_inline function this is any
2672 // listed cpu and any listed features.
2673 unsigned BuiltinID = TargetDecl->getBuiltinID();
2674 std::string MissingFeature;
2675 llvm::StringMap<bool> CallerFeatureMap;
2676 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2677 // When compiling in HipStdPar mode we have to be conservative in rejecting
2678 // target specific features in the FE, and defer the possible error to the
2679 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2680 // referenced by an accelerator executable function, we emit an error.
2681 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2682 if (BuiltinID) {
2683 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2685 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2686 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2687 << TargetDecl->getDeclName()
2688 << FeatureList;
2689 }
2690 } else if (!TargetDecl->isMultiVersion() &&
2691 TargetDecl->hasAttr<TargetAttr>()) {
2692 // Get the required features for the callee.
2693
2694 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2697
2698 SmallVector<StringRef, 1> ReqFeatures;
2699 llvm::StringMap<bool> CalleeFeatureMap;
2700 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2701
2702 for (const auto &F : ParsedAttr.Features) {
2703 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2704 ReqFeatures.push_back(StringRef(F).substr(1));
2705 }
2706
2707 for (const auto &F : CalleeFeatureMap) {
2708 // Only positive features are "required".
2709 if (F.getValue())
2710 ReqFeatures.push_back(F.getKey());
2711 }
2712 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2713 if (!CallerFeatureMap.lookup(Feature)) {
2714 MissingFeature = Feature.str();
2715 return false;
2716 }
2717 return true;
2718 }) && !IsHipStdPar)
2719 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2720 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2721 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2722 llvm::StringMap<bool> CalleeFeatureMap;
2723 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2724
2725 for (const auto &F : CalleeFeatureMap) {
2726 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2727 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2728 !IsHipStdPar)
2729 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2730 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2731 }
2732 }
2733}
2734
2735void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2736 if (!CGM.getCodeGenOpts().SanitizeStats)
2737 return;
2738
2739 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2740 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2741 CGM.getSanStats().create(IRB, SSK);
2742}
2743
2745 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2746 const FunctionProtoType *FP =
2747 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2748 if (FP)
2749 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2750}
2751
2752llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2753 const MultiVersionResolverOption &RO) {
2755 for (const StringRef &Feature : RO.Conditions.Features) {
2756 // Form condition for features which are not yet enabled in target
2757 if (!getContext().getTargetInfo().hasFeature(Feature))
2758 CondFeatures.push_back(Feature);
2759 }
2760 if (!CondFeatures.empty()) {
2761 return EmitAArch64CpuSupports(CondFeatures);
2762 }
2763 return nullptr;
2764}
2765
2766llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2767 const MultiVersionResolverOption &RO) {
2768 llvm::Value *Condition = nullptr;
2769
2770 if (!RO.Conditions.Architecture.empty()) {
2771 StringRef Arch = RO.Conditions.Architecture;
2772 // If arch= specifies an x86-64 micro-architecture level, test the feature
2773 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2774 if (Arch.starts_with("x86-64"))
2775 Condition = EmitX86CpuSupports({Arch});
2776 else
2777 Condition = EmitX86CpuIs(Arch);
2778 }
2779
2780 if (!RO.Conditions.Features.empty()) {
2781 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2782 Condition =
2783 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2784 }
2785 return Condition;
2786}
2787
2789 llvm::Function *Resolver,
2791 llvm::Function *FuncToReturn,
2792 bool SupportsIFunc) {
2793 if (SupportsIFunc) {
2794 Builder.CreateRet(FuncToReturn);
2795 return;
2796 }
2797
2799 llvm::make_pointer_range(Resolver->args()));
2800
2801 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2802 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2803
2804 if (Resolver->getReturnType()->isVoidTy())
2805 Builder.CreateRetVoid();
2806 else
2807 Builder.CreateRet(Result);
2808}
2809
2811 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2812
2813 llvm::Triple::ArchType ArchType =
2814 getContext().getTargetInfo().getTriple().getArch();
2815
2816 switch (ArchType) {
2817 case llvm::Triple::x86:
2818 case llvm::Triple::x86_64:
2819 EmitX86MultiVersionResolver(Resolver, Options);
2820 return;
2821 case llvm::Triple::aarch64:
2822 EmitAArch64MultiVersionResolver(Resolver, Options);
2823 return;
2824
2825 default:
2826 assert(false && "Only implemented for x86 and AArch64 targets");
2827 }
2828}
2829
2831 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2832 assert(!Options.empty() && "No multiversion resolver options found");
2833 assert(Options.back().Conditions.Features.size() == 0 &&
2834 "Default case must be last");
2835 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2836 assert(SupportsIFunc &&
2837 "Multiversion resolver requires target IFUNC support");
2838 bool AArch64CpuInitialized = false;
2839 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2840
2841 for (const MultiVersionResolverOption &RO : Options) {
2842 Builder.SetInsertPoint(CurBlock);
2843 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2844
2845 // The 'default' or 'all features enabled' case.
2846 if (!Condition) {
2847 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2848 SupportsIFunc);
2849 return;
2850 }
2851
2852 if (!AArch64CpuInitialized) {
2853 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2854 EmitAArch64CpuInit();
2855 AArch64CpuInitialized = true;
2856 Builder.SetInsertPoint(CurBlock);
2857 }
2858
2859 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2860 CGBuilderTy RetBuilder(*this, RetBlock);
2861 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2862 SupportsIFunc);
2863 CurBlock = createBasicBlock("resolver_else", Resolver);
2864 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2865 }
2866
2867 // If no default, emit an unreachable.
2868 Builder.SetInsertPoint(CurBlock);
2869 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2870 TrapCall->setDoesNotReturn();
2871 TrapCall->setDoesNotThrow();
2872 Builder.CreateUnreachable();
2873 Builder.ClearInsertionPoint();
2874}
2875
2877 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2878
2879 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2880
2881 // Main function's basic block.
2882 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2883 Builder.SetInsertPoint(CurBlock);
2884 EmitX86CpuInit();
2885
2886 for (const MultiVersionResolverOption &RO : Options) {
2887 Builder.SetInsertPoint(CurBlock);
2888 llvm::Value *Condition = FormX86ResolverCondition(RO);
2889
2890 // The 'default' or 'generic' case.
2891 if (!Condition) {
2892 assert(&RO == Options.end() - 1 &&
2893 "Default or Generic case must be last");
2894 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2895 SupportsIFunc);
2896 return;
2897 }
2898
2899 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2900 CGBuilderTy RetBuilder(*this, RetBlock);
2901 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2902 SupportsIFunc);
2903 CurBlock = createBasicBlock("resolver_else", Resolver);
2904 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2905 }
2906
2907 // If no generic/default, emit an unreachable.
2908 Builder.SetInsertPoint(CurBlock);
2909 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2910 TrapCall->setDoesNotReturn();
2911 TrapCall->setDoesNotThrow();
2912 Builder.CreateUnreachable();
2913 Builder.ClearInsertionPoint();
2914}
2915
2916// Loc - where the diagnostic will point, where in the source code this
2917// alignment has failed.
2918// SecondaryLoc - if present (will be present if sufficiently different from
2919// Loc), the diagnostic will additionally point a "Note:" to this location.
2920// It should be the location where the __attribute__((assume_aligned))
2921// was written e.g.
2923 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2924 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2925 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2926 llvm::Instruction *Assumption) {
2927 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2928 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2929 llvm::Intrinsic::getDeclaration(
2930 Builder.GetInsertBlock()->getParent()->getParent(),
2931 llvm::Intrinsic::assume) &&
2932 "Assumption should be a call to llvm.assume().");
2933 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2934 "Assumption should be the last instruction of the basic block, "
2935 "since the basic block is still being generated.");
2936
2937 if (!SanOpts.has(SanitizerKind::Alignment))
2938 return;
2939
2940 // Don't check pointers to volatile data. The behavior here is implementation-
2941 // defined.
2943 return;
2944
2945 // We need to temorairly remove the assumption so we can insert the
2946 // sanitizer check before it, else the check will be dropped by optimizations.
2947 Assumption->removeFromParent();
2948
2949 {
2950 SanitizerScope SanScope(this);
2951
2952 if (!OffsetValue)
2953 OffsetValue = Builder.getInt1(false); // no offset.
2954
2955 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2956 EmitCheckSourceLocation(SecondaryLoc),
2958 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2959 EmitCheckValue(Alignment),
2960 EmitCheckValue(OffsetValue)};
2961 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2962 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2963 }
2964
2965 // We are now in the (new, empty) "cont" basic block.
2966 // Reintroduce the assumption.
2967 Builder.Insert(Assumption);
2968 // FIXME: Assumption still has it's original basic block as it's Parent.
2969}
2970
2972 if (CGDebugInfo *DI = getDebugInfo())
2973 return DI->SourceLocToDebugLoc(Location);
2974
2975 return llvm::DebugLoc();
2976}
2977
2978llvm::Value *
2979CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2980 Stmt::Likelihood LH) {
2981 switch (LH) {
2982 case Stmt::LH_None:
2983 return Cond;
2984 case Stmt::LH_Likely:
2985 case Stmt::LH_Unlikely:
2986 // Don't generate llvm.expect on -O0 as the backend won't use it for
2987 // anything.
2988 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2989 return Cond;
2990 llvm::Type *CondTy = Cond->getType();
2991 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2992 llvm::Function *FnExpect =
2993 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2994 llvm::Value *ExpectedValueOfCond =
2995 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2996 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2997 Cond->getName() + ".expval");
2998 }
2999 llvm_unreachable("Unknown Likelihood");
3000}
3001
3002llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3003 unsigned NumElementsDst,
3004 const llvm::Twine &Name) {
3005 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3006 unsigned NumElementsSrc = SrcTy->getNumElements();
3007 if (NumElementsSrc == NumElementsDst)
3008 return SrcVec;
3009
3010 std::vector<int> ShuffleMask(NumElementsDst, -1);
3011 for (unsigned MaskIdx = 0;
3012 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3013 ShuffleMask[MaskIdx] = MaskIdx;
3014
3015 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3016}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3284
This file provides some common utility functions for processing Lambda related AST Constructs.
StringRef P
Defines enum values for all the target-independent builtin functions.
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
Definition: MachO.h:48
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
Definition: Module.cpp:100
Defines the Objective-C statement AST node classes.
Enumerates target-specific builtins in their own namespaces within namespace clang.
__device__ double
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const
Parses the target attributes passed in, and returns only the ones that are valid feature names.
CanQualType VoidPtrTy
Definition: ASTContext.h:1118
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:646
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2770
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3514
QualType getElementType() const
Definition: Type.h:3526
Attr - This represents one attribute.
Definition: Attr.h:42
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:3972
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:255
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2535
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2060
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition: DeclCXX.cpp:2462
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2186
QualType getThisType() const
Return the type of the this pointer.
Definition: DeclCXX.cpp:2565
bool isStatic() const
Definition: DeclCXX.cpp:2186
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1022
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1641
bool isCapturelessLambda() const
Definition: DeclCXX.h:1068
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1202
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition: Expr.h:3011
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1638
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
std::string SampleProfileFile
Name of the profile file to use with -fprofile-sample-use.
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
bool hasSanitizeCoverage() const
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
bool hasSanitizeBinaryMetadata() const
unsigned getInAllocaFieldIndex() const
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:871
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper,...
Definition: CGBuilder.h:29
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:397
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:364
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:131
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:123
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for 'this'.
Definition: CGCXXABI.cpp:128
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
All available information about a concrete callee.
Definition: CGCall.h:62
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
CanQualType getReturnType() const
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
void emitEntryFunction(const FunctionDecl *FD, llvm::Function *Fn)
virtual void functionFinished(CodeGenFunction &CGF)
Cleans up references to the objects in finished function.
llvm::OpenMPIRBuilder & getOMPBuilder()
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D)
Emits OpenMP-specific function prolog.
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitDestructorBody(FunctionArgList &Args)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void unprotectFromPeepholes(PeepholeProtection protection)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
static bool hasScalarEvaluationKind(QualType T)
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
void EmitFunctionBody(const Stmt *Body)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
const TargetInfo & getTarget() const
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address EmitVAListRef(const Expr *E)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::BasicBlock * GetIndirectGotoBlock()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Constant * EmitAnnotationArgs(const AnnotateAttr *Attr)
Emit additional args of the annotation.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process.
ProfileList::ExclusionType isFunctionBlockedFromProfileInstr(llvm::Function *Fn, SourceLocation Loc) const
ASTContext & getContext() const
llvm::SanitizerStatReport & getSanStats()
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::ConstantInt * CreateKCFITypeId(QualType T)
Generate a KCFI type identifier for T.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
Definition: CGCall.cpp:1821
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation's translation unit.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:324
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:115
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:181
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:831
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
llvm::Value * getPointer() const
Definition: Address.h:65
bool isValid() const
Definition: Address.h:61
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
Definition: TargetInfo.h:89
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:217
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1606
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:4179
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:579
ASTContext & getASTContext() const LLVM_READONLY
Definition: DeclBase.cpp:501
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1173
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:565
SourceLocation getLocation() const
Definition: DeclBase.h:445
bool hasAttr() const
Definition: DeclBase.h:583
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
bool isIgnored(unsigned DiagID, SourceLocation Loc) const
Determine whether the diagnostic is known to be ignored.
Definition: Diagnostic.h:916
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition: Expr.cpp:3846
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3039
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx, SourceLocation *Loc=nullptr) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
QualType getType() const
Definition: Expr.h:142
ExtVectorType - Extended vector type.
Definition: Type.h:4057
LangOptions::FPExceptionModeKind getExceptionMode() const
Definition: LangOptions.h:861
bool allowFPContractAcrossStatement() const
Definition: LangOptions.h:836
RoundingMode getRoundingMode() const
Definition: LangOptions.h:849
Represents a member of a struct/union/class.
Definition: Decl.h:3058
Represents a function declaration or definition.
Definition: Decl.h:1971
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition: Decl.h:2600
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:3236
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3632
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition: Decl.h:2819
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2481
QualType getReturnType() const
Definition: Decl.h:2755
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2684
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:4113
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:3314
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition: Decl.cpp:3449
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2391
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition: Decl.cpp:3306
bool isDefaulted() const
Whether this function is defaulted.
Definition: Decl.h:2348
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition: Decl.cpp:3979
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4652
QualType desugar() const
Definition: Type.h:5119
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:105
const Decl * getDecl() const
Definition: GlobalDecl.h:103
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:5381
Represents the declaration of a label.
Definition: Decl.h:499
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:276
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
Definition: LangOptions.h:282
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
Definition: LangOptions.h:280
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Definition: LangOptions.h:278
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:461
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:467
RoundingMode getDefaultRoundingMode() const
Definition: LangOptions.h:748
virtual void mangleCanonicalTypeName(QualType T, raw_ostream &, bool NormalizeIntegers=false)=0
Generates a unique string for an externally visible type for use with TBAA or type uniquing.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a parameter to a function.
Definition: Decl.h:1761
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:126
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3135
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition: ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition: ProfileList.h:35
@ Allow
Profiling is allowed.
Definition: ProfileList.h:33
A (possibly-)qualified type.
Definition: Type.h:940
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7439
field_range fields() const
Definition: Decl.h:4375
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5545
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:204
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1358
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:326
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1301
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1302
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1303
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1305
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1236
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1468
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1307
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts) const
Returns target-specific min and max values VScale_Range.
Definition: TargetInfo.h:997
The base class of the type hierarchy.
Definition: Type.h:1813
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1870
bool isVoidType() const
Definition: Type.h:7901
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2134
bool isPointerType() const
Definition: Type.h:7608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:694
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2667
TypeClass getTypeClass() const
Definition: Type.h:2300
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8119
bool isRecordType() const
Definition: Type.h:7702
bool isObjCRetainableType() const
Definition: Type.cpp:4862
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:4610
bool isFunctionNoProtoType() const
Definition: Type.h:2489
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
QualType getType() const
Definition: Decl.h:717
Represents a variable declaration or definition.
Definition: Decl.h:918
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3743
Expr * getSizeExpr() const
Definition: Type.h:3762
QualType getElementType() const
Definition: Type.h:3979
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition: limits.h:60
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition: XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition: XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1873
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ NonNull
Values of this type can never be null.
BinaryOperatorKind
@ OMF_initialize
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
@ Result
The result type of a method or function.
const FunctionProtoType * T
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ Other
Other implicit parameter.
@ EST_None
no exception specification
@ Implicit
An implicit conversion.
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
This structure provides a set of types that are commonly used during IR emission.
llvm::PointerType * ConstGlobalsPtrTy
void* in the address space for constant globals
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:57
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:168
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:182
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:165
XRayInstrMask Mask
Definition: XRayInstr.h:65
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:48