clang 17.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/Dominators.h"
41#include "llvm/IR/FPEnv.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/MDBuilder.h"
45#include "llvm/IR/Operator.h"
46#include "llvm/Support/CRC.h"
47#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
48#include "llvm/Transforms/Utils/PromoteMemToReg.h"
49#include <optional>
50
51using namespace clang;
52using namespace CodeGen;
53
54/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
55/// markers.
56static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
57 const LangOptions &LangOpts) {
58 if (CGOpts.DisableLifetimeMarkers)
59 return false;
60
61 // Sanitizers may use markers.
62 if (CGOpts.SanitizeAddressUseAfterScope ||
63 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
64 LangOpts.Sanitize.has(SanitizerKind::Memory))
65 return true;
66
67 // For now, only in optimized builds.
68 return CGOpts.OptimizationLevel != 0;
69}
70
71CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
72 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
73 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
75 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
76 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
77 ShouldEmitLifetimeMarkers(
78 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
79 if (!suppressNewContext)
80 CGM.getCXXABI().getMangleContext().startNewFunction();
81 EHStack.setCGF(this);
82
83 SetFastMathFlags(CurFPFeatures);
84}
85
86CodeGenFunction::~CodeGenFunction() {
87 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
88
89 if (getLangOpts().OpenMP && CurFn)
91
92 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
93 // outlining etc) at some point. Doing it once the function codegen is done
94 // seems to be a reasonable spot. We do it here, as opposed to the deletion
95 // time of the CodeGenModule, because we have to ensure the IR has not yet
96 // been "emitted" to the outside, thus, modifications are still sensible.
97 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
99}
100
101// Map the LangOption for exception behavior into
102// the corresponding enum in the IR.
103llvm::fp::ExceptionBehavior
105
106 switch (Kind) {
107 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
108 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
109 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
110 default:
111 llvm_unreachable("Unsupported FP Exception Behavior");
112 }
113}
114
116 llvm::FastMathFlags FMF;
117 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
118 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
119 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
120 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
121 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
122 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
123 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
124 Builder.setFastMathFlags(FMF);
125}
126
128 const Expr *E)
129 : CGF(CGF) {
130 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
131}
132
134 FPOptions FPFeatures)
135 : CGF(CGF) {
136 ConstructorHelper(FPFeatures);
137}
138
139void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
140 OldFPFeatures = CGF.CurFPFeatures;
141 CGF.CurFPFeatures = FPFeatures;
142
143 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
144 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
145
146 if (OldFPFeatures == FPFeatures)
147 return;
148
149 FMFGuard.emplace(CGF.Builder);
150
151 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
152 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
153 auto NewExceptionBehavior =
155 FPFeatures.getExceptionMode()));
156 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
157
158 CGF.SetFastMathFlags(FPFeatures);
159
160 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
161 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
162 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
163 (NewExceptionBehavior == llvm::fp::ebIgnore &&
164 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
165 "FPConstrained should be enabled on entire function");
166
167 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
168 auto OldValue =
169 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
170 auto NewValue = OldValue & Value;
171 if (OldValue != NewValue)
172 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
173 };
174 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
175 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
176 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
177 mergeFnAttrValue(
178 "unsafe-fp-math",
179 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
180 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
181 FPFeatures.allowFPContractAcrossStatement());
182}
183
185 CGF.CurFPFeatures = OldFPFeatures;
186 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
187 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
188}
189
191 LValueBaseInfo BaseInfo;
192 TBAAAccessInfo TBAAInfo;
193 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
194 Address Addr(V, ConvertTypeForMem(T), Alignment);
195 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
196}
197
198/// Given a value of type T* that may not be to a complete object,
199/// construct an l-value with the natural pointee alignment of T.
200LValue
202 LValueBaseInfo BaseInfo;
203 TBAAAccessInfo TBAAInfo;
204 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
205 /* forPointeeType= */ true);
206 Address Addr(V, ConvertTypeForMem(T), Align);
207 return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
208}
209
210
212 return CGM.getTypes().ConvertTypeForMem(T);
213}
214
216 return CGM.getTypes().ConvertType(T);
217}
218
220 type = type.getCanonicalType();
221 while (true) {
222 switch (type->getTypeClass()) {
223#define TYPE(name, parent)
224#define ABSTRACT_TYPE(name, parent)
225#define NON_CANONICAL_TYPE(name, parent) case Type::name:
226#define DEPENDENT_TYPE(name, parent) case Type::name:
227#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
228#include "clang/AST/TypeNodes.inc"
229 llvm_unreachable("non-canonical or dependent type in IR-generation");
230
231 case Type::Auto:
232 case Type::DeducedTemplateSpecialization:
233 llvm_unreachable("undeduced type in IR-generation");
234
235 // Various scalar types.
236 case Type::Builtin:
237 case Type::Pointer:
238 case Type::BlockPointer:
239 case Type::LValueReference:
240 case Type::RValueReference:
241 case Type::MemberPointer:
242 case Type::Vector:
243 case Type::ExtVector:
244 case Type::ConstantMatrix:
245 case Type::FunctionProto:
246 case Type::FunctionNoProto:
247 case Type::Enum:
248 case Type::ObjCObjectPointer:
249 case Type::Pipe:
250 case Type::BitInt:
251 return TEK_Scalar;
252
253 // Complexes.
254 case Type::Complex:
255 return TEK_Complex;
256
257 // Arrays, records, and Objective-C objects.
258 case Type::ConstantArray:
259 case Type::IncompleteArray:
260 case Type::VariableArray:
261 case Type::Record:
262 case Type::ObjCObject:
263 case Type::ObjCInterface:
264 return TEK_Aggregate;
265
266 // We operate on atomic values according to their underlying type.
267 case Type::Atomic:
268 type = cast<AtomicType>(type)->getValueType();
269 continue;
270 }
271 llvm_unreachable("unknown type kind!");
272 }
273}
274
275llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
276 // For cleanliness, we try to avoid emitting the return block for
277 // simple cases.
278 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
279
280 if (CurBB) {
281 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
282
283 // We have a valid insert point, reuse it if it is empty or there are no
284 // explicit jumps to the return block.
285 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
286 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
287 delete ReturnBlock.getBlock();
288 ReturnBlock = JumpDest();
289 } else
291 return llvm::DebugLoc();
292 }
293
294 // Otherwise, if the return block is the target of a single direct
295 // branch then we can just put the code in that block instead. This
296 // cleans up functions which started with a unified return block.
297 if (ReturnBlock.getBlock()->hasOneUse()) {
298 llvm::BranchInst *BI =
299 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
300 if (BI && BI->isUnconditional() &&
301 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
302 // Record/return the DebugLoc of the simple 'return' expression to be used
303 // later by the actual 'ret' instruction.
304 llvm::DebugLoc Loc = BI->getDebugLoc();
305 Builder.SetInsertPoint(BI->getParent());
306 BI->eraseFromParent();
307 delete ReturnBlock.getBlock();
308 ReturnBlock = JumpDest();
309 return Loc;
310 }
311 }
312
313 // FIXME: We are at an unreachable point, there is no reason to emit the block
314 // unless it has uses. However, we still need a place to put the debug
315 // region.end for now.
316
318 return llvm::DebugLoc();
319}
320
321static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
322 if (!BB) return;
323 if (!BB->use_empty()) {
324 CGF.CurFn->insert(CGF.CurFn->end(), BB);
325 return;
326 }
327 delete BB;
328}
329
331 assert(BreakContinueStack.empty() &&
332 "mismatched push/pop in break/continue stack!");
333
334 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
335 && NumSimpleReturnExprs == NumReturnExprs
336 && ReturnBlock.getBlock()->use_empty();
337 // Usually the return expression is evaluated before the cleanup
338 // code. If the function contains only a simple return statement,
339 // such as a constant, the location before the cleanup code becomes
340 // the last useful breakpoint in the function, because the simple
341 // return expression will be evaluated after the cleanup code. To be
342 // safe, set the debug location for cleanup code to the location of
343 // the return statement. Otherwise the cleanup code should be at the
344 // end of the function's lexical scope.
345 //
346 // If there are multiple branches to the return block, the branch
347 // instructions will get the location of the return statements and
348 // all will be fine.
349 if (CGDebugInfo *DI = getDebugInfo()) {
350 if (OnlySimpleReturnStmts)
351 DI->EmitLocation(Builder, LastStopPoint);
352 else
353 DI->EmitLocation(Builder, EndLoc);
354 }
355
356 // Pop any cleanups that might have been associated with the
357 // parameters. Do this in whatever block we're currently in; it's
358 // important to do this before we enter the return block or return
359 // edges will be *really* confused.
360 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
361 bool HasOnlyLifetimeMarkers =
363 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
364
365 std::optional<ApplyDebugLocation> OAL;
366 if (HasCleanups) {
367 // Make sure the line table doesn't jump back into the body for
368 // the ret after it's been at EndLoc.
369 if (CGDebugInfo *DI = getDebugInfo()) {
370 if (OnlySimpleReturnStmts)
371 DI->EmitLocation(Builder, EndLoc);
372 else
373 // We may not have a valid end location. Try to apply it anyway, and
374 // fall back to an artificial location if needed.
376 }
377
379 }
380
381 // Emit function epilog (to return).
382 llvm::DebugLoc Loc = EmitReturnBlock();
383
385 if (CGM.getCodeGenOpts().InstrumentFunctions)
386 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
387 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
388 CurFn->addFnAttr("instrument-function-exit-inlined",
389 "__cyg_profile_func_exit");
390 }
391
392 // Emit debug descriptor for function end.
393 if (CGDebugInfo *DI = getDebugInfo())
394 DI->EmitFunctionEnd(Builder, CurFn);
395
396 // Reset the debug location to that of the simple 'return' expression, if any
397 // rather than that of the end of the function's scope '}'.
398 ApplyDebugLocation AL(*this, Loc);
399 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
401
402 assert(EHStack.empty() &&
403 "did not remove all scopes from cleanup stack!");
404
405 // If someone did an indirect goto, emit the indirect goto block at the end of
406 // the function.
407 if (IndirectBranch) {
408 EmitBlock(IndirectBranch->getParent());
409 Builder.ClearInsertionPoint();
410 }
411
412 // If some of our locals escaped, insert a call to llvm.localescape in the
413 // entry block.
414 if (!EscapedLocals.empty()) {
415 // Invert the map from local to index into a simple vector. There should be
416 // no holes.
418 EscapeArgs.resize(EscapedLocals.size());
419 for (auto &Pair : EscapedLocals)
420 EscapeArgs[Pair.second] = Pair.first;
421 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
422 &CGM.getModule(), llvm::Intrinsic::localescape);
423 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
424 }
425
426 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
427 llvm::Instruction *Ptr = AllocaInsertPt;
428 AllocaInsertPt = nullptr;
429 Ptr->eraseFromParent();
430
431 // PostAllocaInsertPt, if created, was lazily created when it was required,
432 // remove it now since it was just created for our own convenience.
433 if (PostAllocaInsertPt) {
434 llvm::Instruction *PostPtr = PostAllocaInsertPt;
435 PostAllocaInsertPt = nullptr;
436 PostPtr->eraseFromParent();
437 }
438
439 // If someone took the address of a label but never did an indirect goto, we
440 // made a zero entry PHI node, which is illegal, zap it now.
441 if (IndirectBranch) {
442 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
443 if (PN->getNumIncomingValues() == 0) {
444 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
445 PN->eraseFromParent();
446 }
447 }
448
450 EmitIfUsed(*this, TerminateLandingPad);
451 EmitIfUsed(*this, TerminateHandler);
452 EmitIfUsed(*this, UnreachableBlock);
453
454 for (const auto &FuncletAndParent : TerminateFunclets)
455 EmitIfUsed(*this, FuncletAndParent.second);
456
457 if (CGM.getCodeGenOpts().EmitDeclMetadata)
458 EmitDeclMetadata();
459
460 for (const auto &R : DeferredReplacements) {
461 if (llvm::Value *Old = R.first) {
462 Old->replaceAllUsesWith(R.second);
463 cast<llvm::Instruction>(Old)->eraseFromParent();
464 }
465 }
466 DeferredReplacements.clear();
467
468 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
469 // PHIs if the current function is a coroutine. We don't do it for all
470 // functions as it may result in slight increase in numbers of instructions
471 // if compiled with no optimizations. We do it for coroutine as the lifetime
472 // of CleanupDestSlot alloca make correct coroutine frame building very
473 // difficult.
475 llvm::DominatorTree DT(*CurFn);
476 llvm::PromoteMemToReg(
477 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
479 }
480
481 // Scan function arguments for vector width.
482 for (llvm::Argument &A : CurFn->args())
483 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
484 LargestVectorWidth =
485 std::max((uint64_t)LargestVectorWidth,
486 VT->getPrimitiveSizeInBits().getKnownMinValue());
487
488 // Update vector width based on return type.
489 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
490 LargestVectorWidth =
491 std::max((uint64_t)LargestVectorWidth,
492 VT->getPrimitiveSizeInBits().getKnownMinValue());
493
494 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
495 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
496
497 // Add the required-vector-width attribute. This contains the max width from:
498 // 1. min-vector-width attribute used in the source program.
499 // 2. Any builtins used that have a vector width specified.
500 // 3. Values passed in and out of inline assembly.
501 // 4. Width of vector arguments and return types for this function.
502 // 5. Width of vector aguments and return types for functions called by this
503 // function.
504 if (getContext().getTargetInfo().getTriple().isX86())
505 CurFn->addFnAttr("min-legal-vector-width",
506 llvm::utostr(LargestVectorWidth));
507
508 // Add vscale_range attribute if appropriate.
509 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
511 if (VScaleRange) {
512 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
513 getLLVMContext(), VScaleRange->first, VScaleRange->second));
514 }
515
516 // If we generated an unreachable return block, delete it now.
517 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
518 Builder.ClearInsertionPoint();
519 ReturnBlock.getBlock()->eraseFromParent();
520 }
521 if (ReturnValue.isValid()) {
522 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
523 if (RetAlloca && RetAlloca->use_empty()) {
524 RetAlloca->eraseFromParent();
526 }
527 }
528}
529
530/// ShouldInstrumentFunction - Return true if the current function should be
531/// instrumented with __cyg_profile_func_* calls
533 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
534 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
535 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
536 return false;
537 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
538 return false;
539 return true;
540}
541
543 if (!CurFuncDecl)
544 return false;
545 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
546}
547
548/// ShouldXRayInstrument - Return true if the current function should be
549/// instrumented with XRay nop sleds.
551 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
552}
553
554/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
555/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
557 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
558 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
561}
562
564 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
565 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
568}
569
570llvm::Value *
572 llvm::Value *EncodedAddr) {
573 // Reconstruct the address of the global.
574 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
575 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
576 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
577 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
578
579 // Load the original pointer through the global.
581 "decoded_addr");
582}
583
584void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
585 llvm::Function *Fn) {
586 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
587 return;
588
589 llvm::LLVMContext &Context = getLLVMContext();
590
591 CGM.GenKernelArgMetadata(Fn, FD, this);
592
593 if (!getLangOpts().OpenCL)
594 return;
595
596 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
597 QualType HintQTy = A->getTypeHint();
598 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
599 bool IsSignedInteger =
600 HintQTy->isSignedIntegerType() ||
601 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
602 llvm::Metadata *AttrMDArgs[] = {
603 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
604 CGM.getTypes().ConvertType(A->getTypeHint()))),
605 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
606 llvm::IntegerType::get(Context, 32),
607 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
608 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
609 }
610
611 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
612 llvm::Metadata *AttrMDArgs[] = {
613 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
614 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
615 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
616 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
617 }
618
619 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
620 llvm::Metadata *AttrMDArgs[] = {
621 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
622 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
623 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
624 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
625 }
626
627 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
628 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
629 llvm::Metadata *AttrMDArgs[] = {
630 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
631 Fn->setMetadata("intel_reqd_sub_group_size",
632 llvm::MDNode::get(Context, AttrMDArgs));
633 }
634}
635
636/// Determine whether the function F ends with a return stmt.
637static bool endsWithReturn(const Decl* F) {
638 const Stmt *Body = nullptr;
639 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
640 Body = FD->getBody();
641 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
642 Body = OMD->getBody();
643
644 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
645 auto LastStmt = CS->body_rbegin();
646 if (LastStmt != CS->body_rend())
647 return isa<ReturnStmt>(*LastStmt);
648 }
649 return false;
650}
651
653 if (SanOpts.has(SanitizerKind::Thread)) {
654 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
655 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
656 }
657}
658
659/// Check if the return value of this function requires sanitization.
660bool CodeGenFunction::requiresReturnValueCheck() const {
661 return requiresReturnValueNullabilityCheck() ||
662 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
663 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
664}
665
666static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
667 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
668 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
669 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
670 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
671 return false;
672
673 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
674 return false;
675
676 if (MD->getNumParams() == 2) {
677 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
678 if (!PT || !PT->isVoidPointerType() ||
679 !PT->getPointeeType().isConstQualified())
680 return false;
681 }
682
683 return true;
684}
685
686/// Return the UBSan prologue signature for \p FD if one is available.
687static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
688 const FunctionDecl *FD) {
689 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
690 if (!MD->isStatic())
691 return nullptr;
693}
694
696 llvm::Function *Fn,
697 const CGFunctionInfo &FnInfo,
698 const FunctionArgList &Args,
699 SourceLocation Loc,
700 SourceLocation StartLoc) {
701 assert(!CurFn &&
702 "Do not use a CodeGenFunction object for more than one function");
703
704 const Decl *D = GD.getDecl();
705
706 DidCallStackSave = false;
707 CurCodeDecl = D;
708 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
709 if (FD && FD->usesSEHTry())
710 CurSEHParent = GD;
711 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
712 FnRetTy = RetTy;
713 CurFn = Fn;
714 CurFnInfo = &FnInfo;
715 assert(CurFn->isDeclaration() && "Function already has body?");
716
717 // If this function is ignored for any of the enabled sanitizers,
718 // disable the sanitizer for the function.
719 do {
720#define SANITIZER(NAME, ID) \
721 if (SanOpts.empty()) \
722 break; \
723 if (SanOpts.has(SanitizerKind::ID)) \
724 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
725 SanOpts.set(SanitizerKind::ID, false);
726
727#include "clang/Basic/Sanitizers.def"
728#undef SANITIZER
729 } while (false);
730
731 if (D) {
732 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
733 bool NoSanitizeCoverage = false;
734
735 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
736 // Apply the no_sanitize* attributes to SanOpts.
737 SanitizerMask mask = Attr->getMask();
738 SanOpts.Mask &= ~mask;
739 if (mask & SanitizerKind::Address)
740 SanOpts.set(SanitizerKind::KernelAddress, false);
741 if (mask & SanitizerKind::KernelAddress)
742 SanOpts.set(SanitizerKind::Address, false);
743 if (mask & SanitizerKind::HWAddress)
744 SanOpts.set(SanitizerKind::KernelHWAddress, false);
745 if (mask & SanitizerKind::KernelHWAddress)
746 SanOpts.set(SanitizerKind::HWAddress, false);
747
748 // SanitizeCoverage is not handled by SanOpts.
749 if (Attr->hasCoverage())
750 NoSanitizeCoverage = true;
751 }
752
753 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
754 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
755
756 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
757 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
758 }
759
761 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
762 } else {
763 // Apply sanitizer attributes to the function.
764 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
765 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
766 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
767 SanitizerKind::KernelHWAddress))
768 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
769 if (SanOpts.has(SanitizerKind::MemtagStack))
770 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
771 if (SanOpts.has(SanitizerKind::Thread))
772 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
773 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
774 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
775 }
776 if (SanOpts.has(SanitizerKind::SafeStack))
777 Fn->addFnAttr(llvm::Attribute::SafeStack);
778 if (SanOpts.has(SanitizerKind::ShadowCallStack))
779 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
780
781 // Apply fuzzing attribute to the function.
782 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
783 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
784
785 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
786 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
787 if (SanOpts.has(SanitizerKind::Thread)) {
788 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
789 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
790 if (OMD->getMethodFamily() == OMF_dealloc ||
791 OMD->getMethodFamily() == OMF_initialize ||
792 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
794 }
795 }
796 }
797
798 // Ignore unrelated casts in STL allocate() since the allocator must cast
799 // from void* to T* before object initialization completes. Don't match on the
800 // namespace because not all allocators are in std::
801 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
803 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
804 }
805
806 // Ignore null checks in coroutine functions since the coroutines passes
807 // are not aware of how to move the extra UBSan instructions across the split
808 // coroutine boundaries.
809 if (D && SanOpts.has(SanitizerKind::Null))
810 if (FD && FD->getBody() &&
811 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
812 SanOpts.Mask &= ~SanitizerKind::Null;
813
814 // Apply xray attributes to the function (as a string, for now)
815 bool AlwaysXRayAttr = false;
816 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
821 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
822 Fn->addFnAttr("function-instrument", "xray-always");
823 AlwaysXRayAttr = true;
824 }
825 if (XRayAttr->neverXRayInstrument())
826 Fn->addFnAttr("function-instrument", "xray-never");
827 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
829 Fn->addFnAttr("xray-log-args",
830 llvm::utostr(LogArgs->getArgumentCount()));
831 }
832 } else {
834 Fn->addFnAttr(
835 "xray-instruction-threshold",
836 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
837 }
838
840 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
841 Fn->addFnAttr("xray-ignore-loops");
842
845 Fn->addFnAttr("xray-skip-exit");
846
849 Fn->addFnAttr("xray-skip-entry");
850
851 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
852 if (FuncGroups > 1) {
853 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
854 CurFn->getName().bytes_end());
855 auto Group = crc32(FuncName) % FuncGroups;
856 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
857 !AlwaysXRayAttr)
858 Fn->addFnAttr("function-instrument", "xray-never");
859 }
860 }
861
862 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
863 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
865 Fn->addFnAttr(llvm::Attribute::SkipProfile);
866 break;
868 Fn->addFnAttr(llvm::Attribute::NoProfile);
869 break;
871 break;
872 }
873 }
874
875 unsigned Count, Offset;
876 if (const auto *Attr =
877 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
878 Count = Attr->getCount();
879 Offset = Attr->getOffset();
880 } else {
881 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
882 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
883 }
884 if (Count && Offset <= Count) {
885 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
886 if (Offset)
887 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
888 }
889 // Instruct that functions for COFF/CodeView targets should start with a
890 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
891 // backends as they don't need it -- instructions on these architectures are
892 // always atomically patchable at runtime.
893 if (CGM.getCodeGenOpts().HotPatch &&
894 getContext().getTargetInfo().getTriple().isX86() &&
895 getContext().getTargetInfo().getTriple().getEnvironment() !=
896 llvm::Triple::CODE16)
897 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
898
899 // Add no-jump-tables value.
900 if (CGM.getCodeGenOpts().NoUseJumpTables)
901 Fn->addFnAttr("no-jump-tables", "true");
902
903 // Add no-inline-line-tables value.
904 if (CGM.getCodeGenOpts().NoInlineLineTables)
905 Fn->addFnAttr("no-inline-line-tables");
906
907 // Add profile-sample-accurate value.
908 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
909 Fn->addFnAttr("profile-sample-accurate");
910
911 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
912 Fn->addFnAttr("use-sample-profile");
913
914 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
915 Fn->addFnAttr("cfi-canonical-jump-table");
916
917 if (D && D->hasAttr<NoProfileFunctionAttr>())
918 Fn->addFnAttr(llvm::Attribute::NoProfile);
919
920 if (D) {
921 // Function attributes take precedence over command line flags.
922 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
923 switch (A->getThunkType()) {
924 case FunctionReturnThunksAttr::Kind::Keep:
925 break;
926 case FunctionReturnThunksAttr::Kind::Extern:
927 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
928 break;
929 }
930 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
931 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
932 }
933
934 if (FD && (getLangOpts().OpenCL ||
935 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
936 // Add metadata for a kernel function.
937 EmitKernelMetadata(FD, Fn);
938 }
939
940 // If we are checking function types, emit a function type signature as
941 // prologue data.
942 if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
943 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
944 // Remove any (C++17) exception specifications, to allow calling e.g. a
945 // noexcept function through a non-noexcept pointer.
947 FD->getType(), EST_None);
948 llvm::Constant *FTRTTIConst =
949 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
950 llvm::GlobalVariable *FTRTTIProxy =
952 llvm::LLVMContext &Ctx = Fn->getContext();
953 llvm::MDBuilder MDB(Ctx);
954 Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize,
955 MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy));
956 CGM.addCompilerUsedGlobal(FTRTTIProxy);
957 }
958 }
959
960 // If we're checking nullability, we need to know whether we can check the
961 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
962 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
964 if (Nullability && *Nullability == NullabilityKind::NonNull) {
965 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
966 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
967 RetValNullabilityPrecondition =
968 llvm::ConstantInt::getTrue(getLLVMContext());
969 }
970 }
971
972 // If we're in C++ mode and the function name is "main", it is guaranteed
973 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
974 // used within a program").
975 //
976 // OpenCL C 2.0 v2.2-11 s6.9.i:
977 // Recursion is not supported.
978 //
979 // SYCL v1.2.1 s3.10:
980 // kernels cannot include RTTI information, exception classes,
981 // recursive code, virtual functions or make use of C++ libraries that
982 // are not compiled for the device.
983 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
984 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
985 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
986 Fn->addFnAttr(llvm::Attribute::NoRecurse);
987
988 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
989 llvm::fp::ExceptionBehavior FPExceptionBehavior =
990 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
991 Builder.setDefaultConstrainedRounding(RM);
992 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
993 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
994 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
995 RM != llvm::RoundingMode::NearestTiesToEven))) {
996 Builder.setIsFPConstrained(true);
997 Fn->addFnAttr(llvm::Attribute::StrictFP);
998 }
999
1000 // If a custom alignment is used, force realigning to this alignment on
1001 // any main function which certainly will need it.
1002 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1003 CGM.getCodeGenOpts().StackAlignment))
1004 Fn->addFnAttr("stackrealign");
1005
1006 // "main" doesn't need to zero out call-used registers.
1007 if (FD && FD->isMain())
1008 Fn->removeFnAttr("zero-call-used-regs");
1009
1010 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1011
1012 // Create a marker to make it easy to insert allocas into the entryblock
1013 // later. Don't create this with the builder, because we don't want it
1014 // folded.
1015 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1016 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1017
1019
1020 Builder.SetInsertPoint(EntryBB);
1021
1022 // If we're checking the return value, allocate space for a pointer to a
1023 // precise source location of the checked return statement.
1024 if (requiresReturnValueCheck()) {
1025 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1026 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1027 ReturnLocation);
1028 }
1029
1030 // Emit subprogram debug descriptor.
1031 if (CGDebugInfo *DI = getDebugInfo()) {
1032 // Reconstruct the type from the argument list so that implicit parameters,
1033 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1034 // convention.
1035 DI->emitFunctionStart(GD, Loc, StartLoc,
1036 DI->getFunctionType(FD, RetTy, Args), CurFn,
1038 }
1039
1041 if (CGM.getCodeGenOpts().InstrumentFunctions)
1042 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1043 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1044 CurFn->addFnAttr("instrument-function-entry-inlined",
1045 "__cyg_profile_func_enter");
1046 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1047 CurFn->addFnAttr("instrument-function-entry-inlined",
1048 "__cyg_profile_func_enter_bare");
1049 }
1050
1051 // Since emitting the mcount call here impacts optimizations such as function
1052 // inlining, we just add an attribute to insert a mcount call in backend.
1053 // The attribute "counting-function" is set to mcount function name which is
1054 // architecture dependent.
1055 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1056 // Calls to fentry/mcount should not be generated if function has
1057 // the no_instrument_function attribute.
1058 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1059 if (CGM.getCodeGenOpts().CallFEntry)
1060 Fn->addFnAttr("fentry-call", "true");
1061 else {
1062 Fn->addFnAttr("instrument-function-entry-inlined",
1063 getTarget().getMCountName());
1064 }
1065 if (CGM.getCodeGenOpts().MNopMCount) {
1066 if (!CGM.getCodeGenOpts().CallFEntry)
1067 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1068 << "-mnop-mcount" << "-mfentry";
1069 Fn->addFnAttr("mnop-mcount");
1070 }
1071
1072 if (CGM.getCodeGenOpts().RecordMCount) {
1073 if (!CGM.getCodeGenOpts().CallFEntry)
1074 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1075 << "-mrecord-mcount" << "-mfentry";
1076 Fn->addFnAttr("mrecord-mcount");
1077 }
1078 }
1079 }
1080
1081 if (CGM.getCodeGenOpts().PackedStack) {
1082 if (getContext().getTargetInfo().getTriple().getArch() !=
1083 llvm::Triple::systemz)
1084 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1085 << "-mpacked-stack";
1086 Fn->addFnAttr("packed-stack");
1087 }
1088
1089 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1090 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1091 Fn->addFnAttr("warn-stack-size",
1092 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1093
1094 if (RetTy->isVoidType()) {
1095 // Void type; nothing to return.
1097
1098 // Count the implicit return.
1099 if (!endsWithReturn(D))
1100 ++NumReturnExprs;
1102 // Indirect return; emit returned value directly into sret slot.
1103 // This reduces code size, and affects correctness in C++.
1104 auto AI = CurFn->arg_begin();
1106 ++AI;
1107 ReturnValue =
1108 Address(&*AI, ConvertType(RetTy),
1116 }
1119 // Load the sret pointer from the argument struct and return into that.
1120 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1121 llvm::Function::arg_iterator EI = CurFn->arg_end();
1122 --EI;
1123 llvm::Value *Addr = Builder.CreateStructGEP(
1124 CurFnInfo->getArgStruct(), &*EI, Idx);
1125 llvm::Type *Ty =
1126 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1128 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1129 ReturnValue = Address(Addr, ConvertType(RetTy),
1131 } else {
1132 ReturnValue = CreateIRTemp(RetTy, "retval");
1133
1134 // Tell the epilog emitter to autorelease the result. We do this
1135 // now so that various specialized functions can suppress it
1136 // during their IR-generation.
1137 if (getLangOpts().ObjCAutoRefCount &&
1139 RetTy->isObjCRetainableType())
1140 AutoreleaseResult = true;
1141 }
1142
1144
1146
1147 // Emit OpenMP specific initialization of the device functions.
1148 if (getLangOpts().OpenMP && CurCodeDecl)
1150
1151 // Handle emitting HLSL entry functions.
1152 if (D && D->hasAttr<HLSLShaderAttr>())
1154
1156
1157 if (isa_and_nonnull<CXXMethodDecl>(D) &&
1158 cast<CXXMethodDecl>(D)->isInstance()) {
1160 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1161 if (MD->getParent()->isLambda() &&
1162 MD->getOverloadedOperator() == OO_Call) {
1163 // We're in a lambda; figure out the captures.
1167 // If the lambda captures the object referred to by '*this' - either by
1168 // value or by reference, make sure CXXThisValue points to the correct
1169 // object.
1170
1171 // Get the lvalue for the field (which is a copy of the enclosing object
1172 // or contains the address of the enclosing object).
1175 // If the enclosing object was captured by value, just use its address.
1176 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1177 } else {
1178 // Load the lvalue pointed to by the field, since '*this' was captured
1179 // by reference.
1180 CXXThisValue =
1181 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1182 }
1183 }
1184 for (auto *FD : MD->getParent()->fields()) {
1185 if (FD->hasCapturedVLAType()) {
1186 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1188 auto VAT = FD->getCapturedVLAType();
1189 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1190 }
1191 }
1192 } else {
1193 // Not in a lambda; just use 'this' from the method.
1194 // FIXME: Should we generate a new load for each use of 'this'? The
1195 // fast register allocator would be happier...
1196 CXXThisValue = CXXABIThisValue;
1197 }
1198
1199 // Check the 'this' pointer once per function, if it's available.
1200 if (CXXABIThisValue) {
1201 SanitizerSet SkippedChecks;
1202 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1203 QualType ThisTy = MD->getThisType();
1204
1205 // If this is the call operator of a lambda with no capture-default, it
1206 // may have a static invoker function, which may call this operator with
1207 // a null 'this' pointer.
1208 if (isLambdaCallOperator(MD) &&
1210 SkippedChecks.set(SanitizerKind::Null, true);
1211
1213 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1214 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1215 }
1216 }
1217
1218 // If any of the arguments have a variably modified type, make sure to
1219 // emit the type size, but only if the function is not naked. Naked functions
1220 // have no prolog to run this evaluation.
1221 if (!FD || !FD->hasAttr<NakedAttr>()) {
1222 for (const VarDecl *VD : Args) {
1223 // Dig out the type as written from ParmVarDecls; it's unclear whether
1224 // the standard (C99 6.9.1p10) requires this, but we're following the
1225 // precedent set by gcc.
1226 QualType Ty;
1227 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1228 Ty = PVD->getOriginalType();
1229 else
1230 Ty = VD->getType();
1231
1232 if (Ty->isVariablyModifiedType())
1234 }
1235 }
1236 // Emit a location at the end of the prologue.
1237 if (CGDebugInfo *DI = getDebugInfo())
1238 DI->EmitLocation(Builder, StartLoc);
1239 // TODO: Do we need to handle this in two places like we do with
1240 // target-features/target-cpu?
1241 if (CurFuncDecl)
1242 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1243 LargestVectorWidth = VecWidth->getVectorWidth();
1244}
1245
1246void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1248 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1250 else
1251 EmitStmt(Body);
1252
1253 // This is checked after emitting the function body so we know if there
1254 // are any permitted infinite loops.
1256 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1257}
1258
1259/// When instrumenting to collect profile data, the counts for some blocks
1260/// such as switch cases need to not include the fall-through counts, so
1261/// emit a branch around the instrumentation code. When not instrumenting,
1262/// this just calls EmitBlock().
1263void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1264 const Stmt *S) {
1265 llvm::BasicBlock *SkipCountBB = nullptr;
1267 // When instrumenting for profiling, the fallthrough to certain
1268 // statements needs to skip over the instrumentation code so that we
1269 // get an accurate count.
1270 SkipCountBB = createBasicBlock("skipcount");
1271 EmitBranch(SkipCountBB);
1272 }
1273 EmitBlock(BB);
1274 uint64_t CurrentCount = getCurrentProfileCount();
1277 if (SkipCountBB)
1278 EmitBlock(SkipCountBB);
1279}
1280
1281/// Tries to mark the given function nounwind based on the
1282/// non-existence of any throwing calls within it. We believe this is
1283/// lightweight enough to do at -O0.
1284static void TryMarkNoThrow(llvm::Function *F) {
1285 // LLVM treats 'nounwind' on a function as part of the type, so we
1286 // can't do this on functions that can be overwritten.
1287 if (F->isInterposable()) return;
1288
1289 for (llvm::BasicBlock &BB : *F)
1290 for (llvm::Instruction &I : BB)
1291 if (I.mayThrow())
1292 return;
1293
1294 F->setDoesNotThrow();
1295}
1296
1298 FunctionArgList &Args) {
1299 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1300 QualType ResTy = FD->getReturnType();
1301
1302 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1303 if (MD && MD->isInstance()) {
1304 if (CGM.getCXXABI().HasThisReturn(GD))
1305 ResTy = MD->getThisType();
1306 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1307 ResTy = CGM.getContext().VoidPtrTy;
1308 CGM.getCXXABI().buildThisParam(*this, Args);
1309 }
1310
1311 // The base version of an inheriting constructor whose constructed base is a
1312 // virtual base is not passed any arguments (because it doesn't actually call
1313 // the inherited constructor).
1314 bool PassedParams = true;
1315 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1316 if (auto Inherited = CD->getInheritedConstructor())
1317 PassedParams =
1318 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1319
1320 if (PassedParams) {
1321 for (auto *Param : FD->parameters()) {
1322 Args.push_back(Param);
1323 if (!Param->hasAttr<PassObjectSizeAttr>())
1324 continue;
1325
1327 getContext(), Param->getDeclContext(), Param->getLocation(),
1328 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1329 SizeArguments[Param] = Implicit;
1330 Args.push_back(Implicit);
1331 }
1332 }
1333
1334 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1335 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1336
1337 return ResTy;
1338}
1339
1340void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1341 const CGFunctionInfo &FnInfo) {
1342 assert(Fn && "generating code for null Function");
1343 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1344 CurGD = GD;
1345
1346 FunctionArgList Args;
1347 QualType ResTy = BuildFunctionArgList(GD, Args);
1348
1349 if (FD->isInlineBuiltinDeclaration()) {
1350 // When generating code for a builtin with an inline declaration, use a
1351 // mangled name to hold the actual body, while keeping an external
1352 // definition in case the function pointer is referenced somewhere.
1353 std::string FDInlineName = (Fn->getName() + ".inline").str();
1354 llvm::Module *M = Fn->getParent();
1355 llvm::Function *Clone = M->getFunction(FDInlineName);
1356 if (!Clone) {
1357 Clone = llvm::Function::Create(Fn->getFunctionType(),
1358 llvm::GlobalValue::InternalLinkage,
1359 Fn->getAddressSpace(), FDInlineName, M);
1360 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1361 }
1362 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1363 Fn = Clone;
1364 } else {
1365 // Detect the unusual situation where an inline version is shadowed by a
1366 // non-inline version. In that case we should pick the external one
1367 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1368 // to detect that situation before we reach codegen, so do some late
1369 // replacement.
1370 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1371 PD = PD->getPreviousDecl()) {
1372 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1373 std::string FDInlineName = (Fn->getName() + ".inline").str();
1374 llvm::Module *M = Fn->getParent();
1375 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1376 Clone->replaceAllUsesWith(Fn);
1377 Clone->eraseFromParent();
1378 }
1379 break;
1380 }
1381 }
1382 }
1383
1384 // Check if we should generate debug info for this function.
1385 if (FD->hasAttr<NoDebugAttr>()) {
1386 // Clear non-distinct debug info that was possibly attached to the function
1387 // due to an earlier declaration without the nodebug attribute
1388 Fn->setSubprogram(nullptr);
1389 // Disable debug info indefinitely for this function
1390 DebugInfo = nullptr;
1391 }
1392
1393 // The function might not have a body if we're generating thunks for a
1394 // function declaration.
1395 SourceRange BodyRange;
1396 if (Stmt *Body = FD->getBody())
1397 BodyRange = Body->getSourceRange();
1398 else
1399 BodyRange = FD->getLocation();
1400 CurEHLocation = BodyRange.getEnd();
1401
1402 // Use the location of the start of the function to determine where
1403 // the function definition is located. By default use the location
1404 // of the declaration as the location for the subprogram. A function
1405 // may lack a declaration in the source code if it is created by code
1406 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1407 SourceLocation Loc = FD->getLocation();
1408
1409 // If this is a function specialization then use the pattern body
1410 // as the location for the function.
1411 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1412 if (SpecDecl->hasBody(SpecDecl))
1413 Loc = SpecDecl->getLocation();
1414
1415 Stmt *Body = FD->getBody();
1416
1417 if (Body) {
1418 // Coroutines always emit lifetime markers.
1419 if (isa<CoroutineBodyStmt>(Body))
1420 ShouldEmitLifetimeMarkers = true;
1421
1422 // Initialize helper which will detect jumps which can cause invalid
1423 // lifetime markers.
1424 if (ShouldEmitLifetimeMarkers)
1425 Bypasses.Init(Body);
1426 }
1427
1428 // Emit the standard function prologue.
1429 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1430
1431 // Save parameters for coroutine function.
1432 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1433 llvm::append_range(FnArgs, FD->parameters());
1434
1435 // Generate the body of the function.
1436 PGO.assignRegionCounters(GD, CurFn);
1437 if (isa<CXXDestructorDecl>(FD))
1438 EmitDestructorBody(Args);
1439 else if (isa<CXXConstructorDecl>(FD))
1440 EmitConstructorBody(Args);
1441 else if (getLangOpts().CUDA &&
1442 !getLangOpts().CUDAIsDevice &&
1443 FD->hasAttr<CUDAGlobalAttr>())
1444 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1445 else if (isa<CXXMethodDecl>(FD) &&
1446 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1447 // The lambda static invoker function is special, because it forwards or
1448 // clones the body of the function call operator (but is actually static).
1449 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1450 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1451 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1452 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1453 // Implicit copy-assignment gets the same special treatment as implicit
1454 // copy-constructors.
1456 } else if (Body) {
1457 EmitFunctionBody(Body);
1458 } else
1459 llvm_unreachable("no definition for emitted function");
1460
1461 // C++11 [stmt.return]p2:
1462 // Flowing off the end of a function [...] results in undefined behavior in
1463 // a value-returning function.
1464 // C11 6.9.1p12:
1465 // If the '}' that terminates a function is reached, and the value of the
1466 // function call is used by the caller, the behavior is undefined.
1468 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1469 bool ShouldEmitUnreachable =
1470 CGM.getCodeGenOpts().StrictReturn ||
1472 if (SanOpts.has(SanitizerKind::Return)) {
1473 SanitizerScope SanScope(this);
1474 llvm::Value *IsFalse = Builder.getFalse();
1475 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1476 SanitizerHandler::MissingReturn,
1477 EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1478 } else if (ShouldEmitUnreachable) {
1479 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1480 EmitTrapCall(llvm::Intrinsic::trap);
1481 }
1482 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1483 Builder.CreateUnreachable();
1484 Builder.ClearInsertionPoint();
1485 }
1486 }
1487
1488 // Emit the standard function epilogue.
1489 FinishFunction(BodyRange.getEnd());
1490
1491 // If we haven't marked the function nothrow through other means, do
1492 // a quick pass now to see if we can.
1493 if (!CurFn->doesNotThrow())
1495}
1496
1497/// ContainsLabel - Return true if the statement contains a label in it. If
1498/// this statement is not executed normally, it not containing a label means
1499/// that we can just remove the code.
1500bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1501 // Null statement, not a label!
1502 if (!S) return false;
1503
1504 // If this is a label, we have to emit the code, consider something like:
1505 // if (0) { ... foo: bar(); } goto foo;
1506 //
1507 // TODO: If anyone cared, we could track __label__'s, since we know that you
1508 // can't jump to one from outside their declared region.
1509 if (isa<LabelStmt>(S))
1510 return true;
1511
1512 // If this is a case/default statement, and we haven't seen a switch, we have
1513 // to emit the code.
1514 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1515 return true;
1516
1517 // If this is a switch statement, we want to ignore cases below it.
1518 if (isa<SwitchStmt>(S))
1519 IgnoreCaseStmts = true;
1520
1521 // Scan subexpressions for verboten labels.
1522 for (const Stmt *SubStmt : S->children())
1523 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1524 return true;
1525
1526 return false;
1527}
1528
1529/// containsBreak - Return true if the statement contains a break out of it.
1530/// If the statement (recursively) contains a switch or loop with a break
1531/// inside of it, this is fine.
1532bool CodeGenFunction::containsBreak(const Stmt *S) {
1533 // Null statement, not a label!
1534 if (!S) return false;
1535
1536 // If this is a switch or loop that defines its own break scope, then we can
1537 // include it and anything inside of it.
1538 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1539 isa<ForStmt>(S))
1540 return false;
1541
1542 if (isa<BreakStmt>(S))
1543 return true;
1544
1545 // Scan subexpressions for verboten breaks.
1546 for (const Stmt *SubStmt : S->children())
1547 if (containsBreak(SubStmt))
1548 return true;
1549
1550 return false;
1551}
1552
1554 if (!S) return false;
1555
1556 // Some statement kinds add a scope and thus never add a decl to the current
1557 // scope. Note, this list is longer than the list of statements that might
1558 // have an unscoped decl nested within them, but this way is conservatively
1559 // correct even if more statement kinds are added.
1560 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1561 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1562 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1563 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1564 return false;
1565
1566 if (isa<DeclStmt>(S))
1567 return true;
1568
1569 for (const Stmt *SubStmt : S->children())
1570 if (mightAddDeclToScope(SubStmt))
1571 return true;
1572
1573 return false;
1574}
1575
1576/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1577/// to a constant, or if it does but contains a label, return false. If it
1578/// constant folds return true and set the boolean result in Result.
1580 bool &ResultBool,
1581 bool AllowLabels) {
1582 llvm::APSInt ResultInt;
1583 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1584 return false;
1585
1586 ResultBool = ResultInt.getBoolValue();
1587 return true;
1588}
1589
1590/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1591/// to a constant, or if it does but contains a label, return false. If it
1592/// constant folds return true and set the folded value.
1594 llvm::APSInt &ResultInt,
1595 bool AllowLabels) {
1596 // FIXME: Rename and handle conversion of other evaluatable things
1597 // to bool.
1599 if (!Cond->EvaluateAsInt(Result, getContext()))
1600 return false; // Not foldable, not integer or not fully evaluatable.
1601
1602 llvm::APSInt Int = Result.Val.getInt();
1603 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1604 return false; // Contains a label.
1605
1606 ResultInt = Int;
1607 return true;
1608}
1609
1610/// Determine whether the given condition is an instrumentable condition
1611/// (i.e. no "&&" or "||").
1613 // Bypass simplistic logical-NOT operator before determining whether the
1614 // condition contains any other logical operator.
1615 if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
1616 if (UnOp->getOpcode() == UO_LNot)
1617 C = UnOp->getSubExpr();
1618
1619 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
1620 return (!BOp || !BOp->isLogicalOp());
1621}
1622
1623/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1624/// increments a profile counter based on the semantics of the given logical
1625/// operator opcode. This is used to instrument branch condition coverage for
1626/// logical operators.
1628 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1629 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1630 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1631 // If not instrumenting, just emit a branch.
1632 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1633 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1634 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1635
1636 llvm::BasicBlock *ThenBlock = nullptr;
1637 llvm::BasicBlock *ElseBlock = nullptr;
1638 llvm::BasicBlock *NextBlock = nullptr;
1639
1640 // Create the block we'll use to increment the appropriate counter.
1641 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1642
1643 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1644 // means we need to evaluate the condition and increment the counter on TRUE:
1645 //
1646 // if (Cond)
1647 // goto CounterIncrBlock;
1648 // else
1649 // goto FalseBlock;
1650 //
1651 // CounterIncrBlock:
1652 // Counter++;
1653 // goto TrueBlock;
1654
1655 if (LOp == BO_LAnd) {
1656 ThenBlock = CounterIncrBlock;
1657 ElseBlock = FalseBlock;
1658 NextBlock = TrueBlock;
1659 }
1660
1661 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1662 // we need to evaluate the condition and increment the counter on FALSE:
1663 //
1664 // if (Cond)
1665 // goto TrueBlock;
1666 // else
1667 // goto CounterIncrBlock;
1668 //
1669 // CounterIncrBlock:
1670 // Counter++;
1671 // goto FalseBlock;
1672
1673 else if (LOp == BO_LOr) {
1674 ThenBlock = TrueBlock;
1675 ElseBlock = CounterIncrBlock;
1676 NextBlock = FalseBlock;
1677 } else {
1678 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1679 }
1680
1681 // Emit Branch based on condition.
1682 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1683
1684 // Emit the block containing the counter increment(s).
1685 EmitBlock(CounterIncrBlock);
1686
1687 // Increment corresponding counter; if index not provided, use Cond as index.
1688 incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1689
1690 // Go to the next block.
1691 EmitBranch(NextBlock);
1692}
1693
1694/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1695/// statement) to the specified blocks. Based on the condition, this might try
1696/// to simplify the codegen of the conditional based on the branch.
1697/// \param LH The value of the likelihood attribute on the True branch.
1699 llvm::BasicBlock *TrueBlock,
1700 llvm::BasicBlock *FalseBlock,
1701 uint64_t TrueCount,
1702 Stmt::Likelihood LH) {
1703 Cond = Cond->IgnoreParens();
1704
1705 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1706
1707 // Handle X && Y in a condition.
1708 if (CondBOp->getOpcode() == BO_LAnd) {
1709 // If we have "1 && X", simplify the code. "0 && X" would have constant
1710 // folded if the case was simple enough.
1711 bool ConstantBool = false;
1712 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1713 ConstantBool) {
1714 // br(1 && X) -> br(X).
1715 incrementProfileCounter(CondBOp);
1716 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1717 FalseBlock, TrueCount, LH);
1718 }
1719
1720 // If we have "X && 1", simplify the code to use an uncond branch.
1721 // "X && 0" would have been constant folded to 0.
1722 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1723 ConstantBool) {
1724 // br(X && 1) -> br(X).
1725 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1726 FalseBlock, TrueCount, LH, CondBOp);
1727 }
1728
1729 // Emit the LHS as a conditional. If the LHS conditional is false, we
1730 // want to jump to the FalseBlock.
1731 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1732 // The counter tells us how often we evaluate RHS, and all of TrueCount
1733 // can be propagated to that branch.
1734 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1735
1736 ConditionalEvaluation eval(*this);
1737 {
1738 ApplyDebugLocation DL(*this, Cond);
1739 // Propagate the likelihood attribute like __builtin_expect
1740 // __builtin_expect(X && Y, 1) -> X and Y are likely
1741 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1742 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1743 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1744 EmitBlock(LHSTrue);
1745 }
1746
1747 incrementProfileCounter(CondBOp);
1748 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1749
1750 // Any temporaries created here are conditional.
1751 eval.begin(*this);
1752 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1753 FalseBlock, TrueCount, LH);
1754 eval.end(*this);
1755
1756 return;
1757 }
1758
1759 if (CondBOp->getOpcode() == BO_LOr) {
1760 // If we have "0 || X", simplify the code. "1 || X" would have constant
1761 // folded if the case was simple enough.
1762 bool ConstantBool = false;
1763 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1764 !ConstantBool) {
1765 // br(0 || X) -> br(X).
1766 incrementProfileCounter(CondBOp);
1767 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1768 FalseBlock, TrueCount, LH);
1769 }
1770
1771 // If we have "X || 0", simplify the code to use an uncond branch.
1772 // "X || 1" would have been constant folded to 1.
1773 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1774 !ConstantBool) {
1775 // br(X || 0) -> br(X).
1776 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1777 FalseBlock, TrueCount, LH, CondBOp);
1778 }
1779
1780 // Emit the LHS as a conditional. If the LHS conditional is true, we
1781 // want to jump to the TrueBlock.
1782 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1783 // We have the count for entry to the RHS and for the whole expression
1784 // being true, so we can divy up True count between the short circuit and
1785 // the RHS.
1786 uint64_t LHSCount =
1787 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1788 uint64_t RHSCount = TrueCount - LHSCount;
1789
1790 ConditionalEvaluation eval(*this);
1791 {
1792 // Propagate the likelihood attribute like __builtin_expect
1793 // __builtin_expect(X || Y, 1) -> only Y is likely
1794 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1795 ApplyDebugLocation DL(*this, Cond);
1796 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1797 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1798 EmitBlock(LHSFalse);
1799 }
1800
1801 incrementProfileCounter(CondBOp);
1802 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1803
1804 // Any temporaries created here are conditional.
1805 eval.begin(*this);
1806 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1807 RHSCount, LH);
1808
1809 eval.end(*this);
1810
1811 return;
1812 }
1813 }
1814
1815 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1816 // br(!x, t, f) -> br(x, f, t)
1817 if (CondUOp->getOpcode() == UO_LNot) {
1818 // Negate the count.
1819 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1820 // The values of the enum are chosen to make this negation possible.
1821 LH = static_cast<Stmt::Likelihood>(-LH);
1822 // Negate the condition and swap the destination blocks.
1823 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1824 FalseCount, LH);
1825 }
1826 }
1827
1828 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1829 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1830 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1831 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1832
1833 // The ConditionalOperator itself has no likelihood information for its
1834 // true and false branches. This matches the behavior of __builtin_expect.
1835 ConditionalEvaluation cond(*this);
1836 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1838
1839 // When computing PGO branch weights, we only know the overall count for
1840 // the true block. This code is essentially doing tail duplication of the
1841 // naive code-gen, introducing new edges for which counts are not
1842 // available. Divide the counts proportionally between the LHS and RHS of
1843 // the conditional operator.
1844 uint64_t LHSScaledTrueCount = 0;
1845 if (TrueCount) {
1846 double LHSRatio =
1848 LHSScaledTrueCount = TrueCount * LHSRatio;
1849 }
1850
1851 cond.begin(*this);
1852 EmitBlock(LHSBlock);
1854 {
1855 ApplyDebugLocation DL(*this, Cond);
1856 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1857 LHSScaledTrueCount, LH);
1858 }
1859 cond.end(*this);
1860
1861 cond.begin(*this);
1862 EmitBlock(RHSBlock);
1863 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1864 TrueCount - LHSScaledTrueCount, LH);
1865 cond.end(*this);
1866
1867 return;
1868 }
1869
1870 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1871 // Conditional operator handling can give us a throw expression as a
1872 // condition for a case like:
1873 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1874 // Fold this to:
1875 // br(c, throw x, br(y, t, f))
1876 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1877 return;
1878 }
1879
1880 // Emit the code with the fully general case.
1881 llvm::Value *CondV;
1882 {
1883 ApplyDebugLocation DL(*this, Cond);
1884 CondV = EvaluateExprAsBool(Cond);
1885 }
1886
1887 llvm::MDNode *Weights = nullptr;
1888 llvm::MDNode *Unpredictable = nullptr;
1889
1890 // If the branch has a condition wrapped by __builtin_unpredictable,
1891 // create metadata that specifies that the branch is unpredictable.
1892 // Don't bother if not optimizing because that metadata would not be used.
1893 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1894 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1895 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1896 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1897 llvm::MDBuilder MDHelper(getLLVMContext());
1898 Unpredictable = MDHelper.createUnpredictable();
1899 }
1900 }
1901
1902 // If there is a Likelihood knowledge for the cond, lower it.
1903 // Note that if not optimizing this won't emit anything.
1904 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1905 if (CondV != NewCondV)
1906 CondV = NewCondV;
1907 else {
1908 // Otherwise, lower profile counts. Note that we do this even at -O0.
1909 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1910 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1911 }
1912
1913 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1914}
1915
1916/// ErrorUnsupported - Print out an error that codegen doesn't support the
1917/// specified stmt yet.
1918void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1920}
1921
1922/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1923/// variable-length array whose elements have a non-zero bit-pattern.
1924///
1925/// \param baseType the inner-most element type of the array
1926/// \param src - a char* pointing to the bit-pattern for a single
1927/// base element of the array
1928/// \param sizeInChars - the total size of the VLA, in chars
1930 Address dest, Address src,
1931 llvm::Value *sizeInChars) {
1933
1934 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1935 llvm::Value *baseSizeInChars
1936 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1937
1938 Address begin =
1939 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1940 llvm::Value *end = Builder.CreateInBoundsGEP(
1941 begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
1942
1943 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1944 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1945 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1946
1947 // Make a loop over the VLA. C99 guarantees that the VLA element
1948 // count must be nonzero.
1949 CGF.EmitBlock(loopBB);
1950
1951 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1952 cur->addIncoming(begin.getPointer(), originBB);
1953
1954 CharUnits curAlign =
1955 dest.getAlignment().alignmentOfArrayElement(baseSize);
1956
1957 // memcpy the individual element bit-pattern.
1958 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
1959 /*volatile*/ false);
1960
1961 // Go to the next element.
1962 llvm::Value *next =
1963 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1964
1965 // Leave if that's the end of the VLA.
1966 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1967 Builder.CreateCondBr(done, contBB, loopBB);
1968 cur->addIncoming(next, loopBB);
1969
1970 CGF.EmitBlock(contBB);
1971}
1972
1973void
1975 // Ignore empty classes in C++.
1976 if (getLangOpts().CPlusPlus) {
1977 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1978 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1979 return;
1980 }
1981 }
1982
1983 // Cast the dest ptr to the appropriate i8 pointer type.
1984 if (DestPtr.getElementType() != Int8Ty)
1985 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1986
1987 // Get size and alignment info for this aggregate.
1989
1990 llvm::Value *SizeVal;
1991 const VariableArrayType *vla;
1992
1993 // Don't bother emitting a zero-byte memset.
1994 if (size.isZero()) {
1995 // But note that getTypeInfo returns 0 for a VLA.
1996 if (const VariableArrayType *vlaType =
1997 dyn_cast_or_null<VariableArrayType>(
1998 getContext().getAsArrayType(Ty))) {
1999 auto VlaSize = getVLASize(vlaType);
2000 SizeVal = VlaSize.NumElts;
2001 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2002 if (!eltSize.isOne())
2003 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2004 vla = vlaType;
2005 } else {
2006 return;
2007 }
2008 } else {
2009 SizeVal = CGM.getSize(size);
2010 vla = nullptr;
2011 }
2012
2013 // If the type contains a pointer to data member we can't memset it to zero.
2014 // Instead, create a null constant and copy it to the destination.
2015 // TODO: there are other patterns besides zero that we can usefully memset,
2016 // like -1, which happens to be the pattern used by member-pointers.
2017 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2018 // For a VLA, emit a single element, then splat that over the VLA.
2019 if (vla) Ty = getContext().getBaseElementType(vla);
2020
2021 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2022
2023 llvm::GlobalVariable *NullVariable =
2024 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2025 /*isConstant=*/true,
2026 llvm::GlobalVariable::PrivateLinkage,
2027 NullConstant, Twine());
2028 CharUnits NullAlign = DestPtr.getAlignment();
2029 NullVariable->setAlignment(NullAlign.getAsAlign());
2030 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
2031 Builder.getInt8Ty(), NullAlign);
2032
2033 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2034
2035 // Get and call the appropriate llvm.memcpy overload.
2036 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2037 return;
2038 }
2039
2040 // Otherwise, just memset the whole thing to zero. This is legal
2041 // because in LLVM, all default initializers (other than the ones we just
2042 // handled above) are guaranteed to have a bit pattern of all zeros.
2043 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2044}
2045
2046llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2047 // Make sure that there is a block for the indirect goto.
2048 if (!IndirectBranch)
2050
2051 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2052
2053 // Make sure the indirect branch includes all of the address-taken blocks.
2054 IndirectBranch->addDestination(BB);
2055 return llvm::BlockAddress::get(CurFn, BB);
2056}
2057
2058llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2059 // If we already made the indirect branch for indirect goto, return its block.
2060 if (IndirectBranch) return IndirectBranch->getParent();
2061
2062 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2063
2064 // Create the PHI node that indirect gotos will add entries to.
2065 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2066 "indirect.goto.dest");
2067
2068 // Create the indirect branch instruction.
2069 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2070 return IndirectBranch->getParent();
2071}
2072
2073/// Computes the length of an array in elements, as well as the base
2074/// element type and a properly-typed first element pointer.
2075llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2076 QualType &baseType,
2077 Address &addr) {
2078 const ArrayType *arrayType = origArrayType;
2079
2080 // If it's a VLA, we have to load the stored size. Note that
2081 // this is the size of the VLA in bytes, not its size in elements.
2082 llvm::Value *numVLAElements = nullptr;
2083 if (isa<VariableArrayType>(arrayType)) {
2084 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2085
2086 // Walk into all VLAs. This doesn't require changes to addr,
2087 // which has type T* where T is the first non-VLA element type.
2088 do {
2089 QualType elementType = arrayType->getElementType();
2090 arrayType = getContext().getAsArrayType(elementType);
2091
2092 // If we only have VLA components, 'addr' requires no adjustment.
2093 if (!arrayType) {
2094 baseType = elementType;
2095 return numVLAElements;
2096 }
2097 } while (isa<VariableArrayType>(arrayType));
2098
2099 // We get out here only if we find a constant array type
2100 // inside the VLA.
2101 }
2102
2103 // We have some number of constant-length arrays, so addr should
2104 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2105 // down to the first element of addr.
2107
2108 // GEP down to the array type.
2109 llvm::ConstantInt *zero = Builder.getInt32(0);
2110 gepIndices.push_back(zero);
2111
2112 uint64_t countFromCLAs = 1;
2113 QualType eltType;
2114
2115 llvm::ArrayType *llvmArrayType =
2116 dyn_cast<llvm::ArrayType>(addr.getElementType());
2117 while (llvmArrayType) {
2118 assert(isa<ConstantArrayType>(arrayType));
2119 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2120 == llvmArrayType->getNumElements());
2121
2122 gepIndices.push_back(zero);
2123 countFromCLAs *= llvmArrayType->getNumElements();
2124 eltType = arrayType->getElementType();
2125
2126 llvmArrayType =
2127 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2128 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2129 assert((!llvmArrayType || arrayType) &&
2130 "LLVM and Clang types are out-of-synch");
2131 }
2132
2133 if (arrayType) {
2134 // From this point onwards, the Clang array type has been emitted
2135 // as some other type (probably a packed struct). Compute the array
2136 // size, and just emit the 'begin' expression as a bitcast.
2137 while (arrayType) {
2138 countFromCLAs *=
2139 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2140 eltType = arrayType->getElementType();
2141 arrayType = getContext().getAsArrayType(eltType);
2142 }
2143
2144 llvm::Type *baseType = ConvertType(eltType);
2145 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
2146 } else {
2147 // Create the actual GEP.
2148 addr = Address(Builder.CreateInBoundsGEP(
2149 addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2150 ConvertTypeForMem(eltType),
2151 addr.getAlignment());
2152 }
2153
2154 baseType = eltType;
2155
2156 llvm::Value *numElements
2157 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2158
2159 // If we had any VLA dimensions, factor them in.
2160 if (numVLAElements)
2161 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2162
2163 return numElements;
2164}
2165
2166CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2168 assert(vla && "type was not a variable array type!");
2169 return getVLASize(vla);
2170}
2171
2172CodeGenFunction::VlaSizePair
2174 // The number of elements so far; always size_t.
2175 llvm::Value *numElements = nullptr;
2176
2177 QualType elementType;
2178 do {
2179 elementType = type->getElementType();
2180 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2181 assert(vlaSize && "no size for VLA!");
2182 assert(vlaSize->getType() == SizeTy);
2183
2184 if (!numElements) {
2185 numElements = vlaSize;
2186 } else {
2187 // It's undefined behavior if this wraps around, so mark it that way.
2188 // FIXME: Teach -fsanitize=undefined to trap this.
2189 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2190 }
2191 } while ((type = getContext().getAsVariableArrayType(elementType)));
2192
2193 return { numElements, elementType };
2194}
2195
2196CodeGenFunction::VlaSizePair
2199 assert(vla && "type was not a variable array type!");
2200 return getVLAElements1D(vla);
2201}
2202
2203CodeGenFunction::VlaSizePair
2205 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2206 assert(VlaSize && "no size for VLA!");
2207 assert(VlaSize->getType() == SizeTy);
2208 return { VlaSize, Vla->getElementType() };
2209}
2210
2212 assert(type->isVariablyModifiedType() &&
2213 "Must pass variably modified type to EmitVLASizes!");
2214
2216
2217 // We're going to walk down into the type and look for VLA
2218 // expressions.
2219 do {
2220 assert(type->isVariablyModifiedType());
2221
2222 const Type *ty = type.getTypePtr();
2223 switch (ty->getTypeClass()) {
2224
2225#define TYPE(Class, Base)
2226#define ABSTRACT_TYPE(Class, Base)
2227#define NON_CANONICAL_TYPE(Class, Base)
2228#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2229#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2230#include "clang/AST/TypeNodes.inc"
2231 llvm_unreachable("unexpected dependent type!");
2232
2233 // These types are never variably-modified.
2234 case Type::Builtin:
2235 case Type::Complex:
2236 case Type::Vector:
2237 case Type::ExtVector:
2238 case Type::ConstantMatrix:
2239 case Type::Record:
2240 case Type::Enum:
2241 case Type::Using:
2242 case Type::TemplateSpecialization:
2243 case Type::ObjCTypeParam:
2244 case Type::ObjCObject:
2245 case Type::ObjCInterface:
2246 case Type::ObjCObjectPointer:
2247 case Type::BitInt:
2248 llvm_unreachable("type class is never variably-modified!");
2249
2250 case Type::Elaborated:
2251 type = cast<ElaboratedType>(ty)->getNamedType();
2252 break;
2253
2254 case Type::Adjusted:
2255 type = cast<AdjustedType>(ty)->getAdjustedType();
2256 break;
2257
2258 case Type::Decayed:
2259 type = cast<DecayedType>(ty)->getPointeeType();
2260 break;
2261
2262 case Type::Pointer:
2263 type = cast<PointerType>(ty)->getPointeeType();
2264 break;
2265
2266 case Type::BlockPointer:
2267 type = cast<BlockPointerType>(ty)->getPointeeType();
2268 break;
2269
2270 case Type::LValueReference:
2271 case Type::RValueReference:
2272 type = cast<ReferenceType>(ty)->getPointeeType();
2273 break;
2274
2275 case Type::MemberPointer:
2276 type = cast<MemberPointerType>(ty)->getPointeeType();
2277 break;
2278
2279 case Type::ConstantArray:
2280 case Type::IncompleteArray:
2281 // Losing element qualification here is fine.
2282 type = cast<ArrayType>(ty)->getElementType();
2283 break;
2284
2285 case Type::VariableArray: {
2286 // Losing element qualification here is fine.
2287 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2288
2289 // Unknown size indication requires no size computation.
2290 // Otherwise, evaluate and record it.
2291 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2292 // It's possible that we might have emitted this already,
2293 // e.g. with a typedef and a pointer to it.
2294 llvm::Value *&entry = VLASizeMap[sizeExpr];
2295 if (!entry) {
2296 llvm::Value *size = EmitScalarExpr(sizeExpr);
2297
2298 // C11 6.7.6.2p5:
2299 // If the size is an expression that is not an integer constant
2300 // expression [...] each time it is evaluated it shall have a value
2301 // greater than zero.
2302 if (SanOpts.has(SanitizerKind::VLABound)) {
2303 SanitizerScope SanScope(this);
2304 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2305 clang::QualType SEType = sizeExpr->getType();
2306 llvm::Value *CheckCondition =
2307 SEType->isSignedIntegerType()
2308 ? Builder.CreateICmpSGT(size, Zero)
2309 : Builder.CreateICmpUGT(size, Zero);
2310 llvm::Constant *StaticArgs[] = {
2311 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2312 EmitCheckTypeDescriptor(SEType)};
2313 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2314 SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2315 }
2316
2317 // Always zexting here would be wrong if it weren't
2318 // undefined behavior to have a negative bound.
2319 // FIXME: What about when size's type is larger than size_t?
2320 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2321 }
2322 }
2323 type = vat->getElementType();
2324 break;
2325 }
2326
2327 case Type::FunctionProto:
2328 case Type::FunctionNoProto:
2329 type = cast<FunctionType>(ty)->getReturnType();
2330 break;
2331
2332 case Type::Paren:
2333 case Type::TypeOf:
2334 case Type::UnaryTransform:
2335 case Type::Attributed:
2336 case Type::BTFTagAttributed:
2337 case Type::SubstTemplateTypeParm:
2338 case Type::MacroQualified:
2339 // Keep walking after single level desugaring.
2340 type = type.getSingleStepDesugaredType(getContext());
2341 break;
2342
2343 case Type::Typedef:
2344 case Type::Decltype:
2345 case Type::Auto:
2346 case Type::DeducedTemplateSpecialization:
2347 // Stop walking: nothing to do.
2348 return;
2349
2350 case Type::TypeOfExpr:
2351 // Stop walking: emit typeof expression.
2352 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2353 return;
2354
2355 case Type::Atomic:
2356 type = cast<AtomicType>(ty)->getValueType();
2357 break;
2358
2359 case Type::Pipe:
2360 type = cast<PipeType>(ty)->getElementType();
2361 break;
2362 }
2363 } while (type->isVariablyModifiedType());
2364}
2365
2367 if (getContext().getBuiltinVaListType()->isArrayType())
2368 return EmitPointerWithAlignment(E);
2369 return EmitLValue(E).getAddress(*this);
2370}
2371
2373 return EmitLValue(E).getAddress(*this);
2374}
2375
2377 const APValue &Init) {
2378 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2379 if (CGDebugInfo *Dbg = getDebugInfo())
2381 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2382}
2383
2384CodeGenFunction::PeepholeProtection
2386 // At the moment, the only aggressive peephole we do in IR gen
2387 // is trunc(zext) folding, but if we add more, we can easily
2388 // extend this protection.
2389
2390 if (!rvalue.isScalar()) return PeepholeProtection();
2391 llvm::Value *value = rvalue.getScalarVal();
2392 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2393
2394 // Just make an extra bitcast.
2395 assert(HaveInsertPoint());
2396 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2397 Builder.GetInsertBlock());
2398
2399 PeepholeProtection protection;
2400 protection.Inst = inst;
2401 return protection;
2402}
2403
2404void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2405 if (!protection.Inst) return;
2406
2407 // In theory, we could try to duplicate the peepholes now, but whatever.
2408 protection.Inst->eraseFromParent();
2409}
2410
2411void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2412 QualType Ty, SourceLocation Loc,
2413 SourceLocation AssumptionLoc,
2414 llvm::Value *Alignment,
2415 llvm::Value *OffsetValue) {
2416 if (Alignment->getType() != IntPtrTy)
2417 Alignment =
2418 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2419 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2420 OffsetValue =
2421 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2422 llvm::Value *TheCheck = nullptr;
2423 if (SanOpts.has(SanitizerKind::Alignment)) {
2424 llvm::Value *PtrIntValue =
2425 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2426
2427 if (OffsetValue) {
2428 bool IsOffsetZero = false;
2429 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2430 IsOffsetZero = CI->isZero();
2431
2432 if (!IsOffsetZero)
2433 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2434 }
2435
2436 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2437 llvm::Value *Mask =
2438 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2439 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2440 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2441 }
2442 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2443 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2444
2445 if (!SanOpts.has(SanitizerKind::Alignment))
2446 return;
2447 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2448 OffsetValue, TheCheck, Assumption);
2449}
2450
2451void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2452 const Expr *E,
2453 SourceLocation AssumptionLoc,
2454 llvm::Value *Alignment,
2455 llvm::Value *OffsetValue) {
2456 QualType Ty = E->getType();
2457 SourceLocation Loc = E->getExprLoc();
2458
2459 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2460 OffsetValue);
2461}
2462
2463llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2464 llvm::Value *AnnotatedVal,
2465 StringRef AnnotationStr,
2466 SourceLocation Location,
2467 const AnnotateAttr *Attr) {
2469 AnnotatedVal,
2470 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr),
2472 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location),
2474 CGM.EmitAnnotationLineNo(Location),
2475 };
2476 if (Attr)
2477 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2478 return Builder.CreateCall(AnnotationFn, Args);
2479}
2480
2481void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2482 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2483 // FIXME We create a new bitcast for every annotation because that's what
2484 // llvm-gcc was doing.
2485 unsigned AS = V->getType()->getPointerAddressSpace();
2486 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(AS);
2487 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2488 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2489 {I8PtrTy, CGM.ConstGlobalsPtrTy}),
2490 Builder.CreateBitCast(V, I8PtrTy, V->getName()),
2491 I->getAnnotation(), D->getLocation(), I);
2492}
2493
2495 Address Addr) {
2496 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2497 llvm::Value *V = Addr.getPointer();
2498 llvm::Type *VTy = V->getType();
2499 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2500 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2501 llvm::PointerType *IntrinTy =
2502 llvm::PointerType::getWithSamePointeeType(CGM.Int8PtrTy, AS);
2503 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2504 {IntrinTy, CGM.ConstGlobalsPtrTy});
2505
2506 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2507 // FIXME Always emit the cast inst so we can differentiate between
2508 // annotation on the first field of a struct and annotation on the struct
2509 // itself.
2510 if (VTy != IntrinTy)
2511 V = Builder.CreateBitCast(V, IntrinTy);
2512 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2513 V = Builder.CreateBitCast(V, VTy);
2514 }
2515
2516 return Address(V, Addr.getElementType(), Addr.getAlignment());
2517}
2518
2520
2522 : CGF(CGF) {
2523 assert(!CGF->IsSanitizerScope);
2524 CGF->IsSanitizerScope = true;
2525}
2526
2528 CGF->IsSanitizerScope = false;
2529}
2530
2531void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2532 const llvm::Twine &Name,
2533 llvm::BasicBlock *BB,
2534 llvm::BasicBlock::iterator InsertPt) const {
2536 if (IsSanitizerScope)
2538}
2539
2541 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2542 llvm::BasicBlock::iterator InsertPt) const {
2543 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2544 if (CGF)
2545 CGF->InsertHelper(I, Name, BB, InsertPt);
2546}
2547
2548// Emits an error if we don't have a valid set of target features for the
2549// called function.
2551 const FunctionDecl *TargetDecl) {
2552 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2553}
2554
2555// Emits an error if we don't have a valid set of target features for the
2556// called function.
2558 const FunctionDecl *TargetDecl) {
2559 // Early exit if this is an indirect call.
2560 if (!TargetDecl)
2561 return;
2562
2563 // Get the current enclosing function if it exists. If it doesn't
2564 // we can't check the target features anyhow.
2565 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2566 if (!FD)
2567 return;
2568
2569 // Grab the required features for the call. For a builtin this is listed in
2570 // the td file with the default cpu, for an always_inline function this is any
2571 // listed cpu and any listed features.
2572 unsigned BuiltinID = TargetDecl->getBuiltinID();
2573 std::string MissingFeature;
2574 llvm::StringMap<bool> CallerFeatureMap;
2575 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2576 if (BuiltinID) {
2577 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2579 FeatureList, CallerFeatureMap)) {
2580 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2581 << TargetDecl->getDeclName()
2582 << FeatureList;
2583 }
2584 } else if (!TargetDecl->isMultiVersion() &&
2585 TargetDecl->hasAttr<TargetAttr>()) {
2586 // Get the required features for the callee.
2587
2588 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2591
2592 SmallVector<StringRef, 1> ReqFeatures;
2593 llvm::StringMap<bool> CalleeFeatureMap;
2594 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2595
2596 for (const auto &F : ParsedAttr.Features) {
2597 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2598 ReqFeatures.push_back(StringRef(F).substr(1));
2599 }
2600
2601 for (const auto &F : CalleeFeatureMap) {
2602 // Only positive features are "required".
2603 if (F.getValue())
2604 ReqFeatures.push_back(F.getKey());
2605 }
2606 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2607 if (!CallerFeatureMap.lookup(Feature)) {
2608 MissingFeature = Feature.str();
2609 return false;
2610 }
2611 return true;
2612 }))
2613 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2614 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2615 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2616 llvm::StringMap<bool> CalleeFeatureMap;
2617 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2618
2619 for (const auto &F : CalleeFeatureMap) {
2620 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2621 !CallerFeatureMap.find(F.getKey())->getValue()))
2622 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2623 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2624 }
2625 }
2626}
2627
2628void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2629 if (!CGM.getCodeGenOpts().SanitizeStats)
2630 return;
2631
2632 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2633 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2634 CGM.getSanStats().create(IRB, SSK);
2635}
2636
2638 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2639 const FunctionProtoType *FP =
2640 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2641 if (FP)
2642 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2643}
2644
2645llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2646 const MultiVersionResolverOption &RO) {
2648 for (const StringRef &Feature : RO.Conditions.Features) {
2649 // Form condition for features which are not yet enabled in target
2650 if (!getContext().getTargetInfo().hasFeature(Feature))
2651 CondFeatures.push_back(Feature);
2652 }
2653 if (!CondFeatures.empty()) {
2654 return EmitAArch64CpuSupports(CondFeatures);
2655 }
2656 return nullptr;
2657}
2658
2659llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2660 const MultiVersionResolverOption &RO) {
2661 llvm::Value *Condition = nullptr;
2662
2663 if (!RO.Conditions.Architecture.empty())
2664 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2665
2666 if (!RO.Conditions.Features.empty()) {
2667 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2668 Condition =
2669 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2670 }
2671 return Condition;
2672}
2673
2675 llvm::Function *Resolver,
2677 llvm::Function *FuncToReturn,
2678 bool SupportsIFunc) {
2679 if (SupportsIFunc) {
2680 Builder.CreateRet(FuncToReturn);
2681 return;
2682 }
2683
2685 llvm::make_pointer_range(Resolver->args()));
2686
2687 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2688 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2689
2690 if (Resolver->getReturnType()->isVoidTy())
2691 Builder.CreateRetVoid();
2692 else
2693 Builder.CreateRet(Result);
2694}
2695
2697 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2698
2699 llvm::Triple::ArchType ArchType =
2700 getContext().getTargetInfo().getTriple().getArch();
2701
2702 switch (ArchType) {
2703 case llvm::Triple::x86:
2704 case llvm::Triple::x86_64:
2705 EmitX86MultiVersionResolver(Resolver, Options);
2706 return;
2707 case llvm::Triple::aarch64:
2708 EmitAArch64MultiVersionResolver(Resolver, Options);
2709 return;
2710
2711 default:
2712 assert(false && "Only implemented for x86 and AArch64 targets");
2713 }
2714}
2715
2717 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2718 assert(!Options.empty() && "No multiversion resolver options found");
2719 assert(Options.back().Conditions.Features.size() == 0 &&
2720 "Default case must be last");
2721 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2722 assert(SupportsIFunc &&
2723 "Multiversion resolver requires target IFUNC support");
2724 bool AArch64CpuInitialized = false;
2725 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2726
2727 for (const MultiVersionResolverOption &RO : Options) {
2728 Builder.SetInsertPoint(CurBlock);
2729 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2730
2731 // The 'default' or 'all features enabled' case.
2732 if (!Condition) {
2733 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2734 SupportsIFunc);
2735 return;
2736 }
2737
2738 if (!AArch64CpuInitialized) {
2739 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2740 EmitAArch64CpuInit();
2741 AArch64CpuInitialized = true;
2742 Builder.SetInsertPoint(CurBlock);
2743 }
2744
2745 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2746 CGBuilderTy RetBuilder(*this, RetBlock);
2747 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2748 SupportsIFunc);
2749 CurBlock = createBasicBlock("resolver_else", Resolver);
2750 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2751 }
2752
2753 // If no default, emit an unreachable.
2754 Builder.SetInsertPoint(CurBlock);
2755 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2756 TrapCall->setDoesNotReturn();
2757 TrapCall->setDoesNotThrow();
2758 Builder.CreateUnreachable();
2759 Builder.ClearInsertionPoint();
2760}
2761
2763 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2764
2765 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2766
2767 // Main function's basic block.
2768 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2769 Builder.SetInsertPoint(CurBlock);
2770 EmitX86CpuInit();
2771
2772 for (const MultiVersionResolverOption &RO : Options) {
2773 Builder.SetInsertPoint(CurBlock);
2774 llvm::Value *Condition = FormX86ResolverCondition(RO);
2775
2776 // The 'default' or 'generic' case.
2777 if (!Condition) {
2778 assert(&RO == Options.end() - 1 &&
2779 "Default or Generic case must be last");
2780 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2781 SupportsIFunc);
2782 return;
2783 }
2784
2785 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2786 CGBuilderTy RetBuilder(*this, RetBlock);
2787 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2788 SupportsIFunc);
2789 CurBlock = createBasicBlock("resolver_else", Resolver);
2790 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2791 }
2792
2793 // If no generic/default, emit an unreachable.
2794 Builder.SetInsertPoint(CurBlock);
2795 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2796 TrapCall->setDoesNotReturn();
2797 TrapCall->setDoesNotThrow();
2798 Builder.CreateUnreachable();
2799 Builder.ClearInsertionPoint();
2800}
2801
2802// Loc - where the diagnostic will point, where in the source code this
2803// alignment has failed.
2804// SecondaryLoc - if present (will be present if sufficiently different from
2805// Loc), the diagnostic will additionally point a "Note:" to this location.
2806// It should be the location where the __attribute__((assume_aligned))
2807// was written e.g.
2809 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2810 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2811 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2812 llvm::Instruction *Assumption) {
2813 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2814 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2815 llvm::Intrinsic::getDeclaration(
2816 Builder.GetInsertBlock()->getParent()->getParent(),
2817 llvm::Intrinsic::assume) &&
2818 "Assumption should be a call to llvm.assume().");
2819 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2820 "Assumption should be the last instruction of the basic block, "
2821 "since the basic block is still being generated.");
2822
2823 if (!SanOpts.has(SanitizerKind::Alignment))
2824 return;
2825
2826 // Don't check pointers to volatile data. The behavior here is implementation-
2827 // defined.
2829 return;
2830
2831 // We need to temorairly remove the assumption so we can insert the
2832 // sanitizer check before it, else the check will be dropped by optimizations.
2833 Assumption->removeFromParent();
2834
2835 {
2836 SanitizerScope SanScope(this);
2837
2838 if (!OffsetValue)
2839 OffsetValue = Builder.getInt1(false); // no offset.
2840
2841 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2842 EmitCheckSourceLocation(SecondaryLoc),
2844 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2845 EmitCheckValue(Alignment),
2846 EmitCheckValue(OffsetValue)};
2847 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2848 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2849 }
2850
2851 // We are now in the (new, empty) "cont" basic block.
2852 // Reintroduce the assumption.
2853 Builder.Insert(Assumption);
2854 // FIXME: Assumption still has it's original basic block as it's Parent.
2855}
2856
2858 if (CGDebugInfo *DI = getDebugInfo())
2859 return DI->SourceLocToDebugLoc(Location);
2860
2861 return llvm::DebugLoc();
2862}
2863
2864llvm::Value *
2865CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2866 Stmt::Likelihood LH) {
2867 switch (LH) {
2868 case Stmt::LH_None:
2869 return Cond;
2870 case Stmt::LH_Likely:
2871 case Stmt::LH_Unlikely:
2872 // Don't generate llvm.expect on -O0 as the backend won't use it for
2873 // anything.
2874 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2875 return Cond;
2876 llvm::Type *CondTy = Cond->getType();
2877 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2878 llvm::Function *FnExpect =
2879 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2880 llvm::Value *ExpectedValueOfCond =
2881 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2882 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2883 Cond->getName() + ".expval");
2884 }
2885 llvm_unreachable("Unknown Likelihood");
2886}
2887
2888llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2889 unsigned NumElementsDst,
2890 const llvm::Twine &Name) {
2891 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2892 unsigned NumElementsSrc = SrcTy->getNumElements();
2893 if (NumElementsSrc == NumElementsDst)
2894 return SrcVec;
2895
2896 std::vector<int> ShuffleMask(NumElementsDst, -1);
2897 for (unsigned MaskIdx = 0;
2898 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
2899 ShuffleMask[MaskIdx] = MaskIdx;
2900
2901 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
2902}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3217
This file provides some common utility functions for processing Lambda related AST Constructs.
static SVal getValue(SVal val, SValBuilder &svalBuilder)
Defines enum values for all the target-independent builtin functions.
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it.
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
unsigned Offset
Definition: Format.cpp:2776
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
Definition: Module.cpp:101
Defines the Objective-C statement AST node classes.
__device__ double
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const
Parses the target attributes passed in, and returns only the ones that are valid feature names.
CanQualType VoidPtrTy
Definition: ASTContext.h:1105
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:633
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2706
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:744
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3031
QualType getElementType() const
Definition: Type.h:3052
Attr - This represents one attribute.
Definition: Attr.h:40
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3814
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:3946
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:255
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2470
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2014
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2129
QualType getThisType() const
Return the type of the this pointer.
Definition: DeclCXX.cpp:2502
bool isInstance() const
Definition: DeclCXX.h:2041
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1005
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1599
LambdaCaptureDefault getLambdaCaptureDefault() const
Definition: DeclCXX.h:1046
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1187
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2812
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1620
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
std::string SampleProfileFile
Name of the profile file to use with -fprofile-sample-use.
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
bool hasSanitizeCoverage() const
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
unsigned getInAllocaFieldIndex() const
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CharUnits getIndirectAlign() const
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:49
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:81
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:65
llvm::Value * getPointer() const
Definition: Address.h:54
bool isValid() const
Definition: Address.h:50
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:60
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:798
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:837
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper,...
Definition: CGBuilder.h:26
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const override
This forwards to CodeGenFunction::InsertHelper.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:99
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:169
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:177
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:347
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:193
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:318
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:135
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:127
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for 'this'.
Definition: CGCXXABI.cpp:121
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
All available information about a concrete callee.
Definition: CGCall.h:60
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
CanQualType getReturnType() const
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
void emitEntryFunction(const FunctionDecl *FD, llvm::Function *Fn)
virtual void functionFinished(CodeGenFunction &CGF)
Cleans up references to the objects in finished function.
llvm::OpenMPIRBuilder & getOMPBuilder()
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D)
Emits OpenMP-specific function prolog.
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitDestructorBody(FunctionArgList &Args)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
llvm::Value * DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr)
Decode an address used in a function prologue, encoded by EncodeAddrForUseInPrologue.
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void unprotectFromPeepholes(PeepholeProtection protection)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
static bool hasScalarEvaluationKind(QualType T)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
void EmitFunctionBody(const Stmt *Body)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
const TargetInfo & getTarget() const
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
Address EmitVAListRef(const Expr *E)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::BasicBlock * GetIndirectGotoBlock()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Constant * EmitAnnotationArgs(const AnnotateAttr *Attr)
Emit additional args of the annotation.
llvm::Module & getModule() const
void addCompilerUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.compiler.used metadata.
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
llvm::GlobalVariable * GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr)
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process.
SanitizerMetadata * getSanitizerMetadata()
ProfileList::ExclusionType isFunctionBlockedFromProfileInstr(llvm::Function *Fn, SourceLocation Loc) const
ASTContext & getContext() const
llvm::SanitizerStatReport & getSanStats()
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void GenKernelArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::ConstantInt * CreateKCFITypeId(QualType T)
Generate a KCFI type identifier for T.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
Definition: CGCall.cpp:1788
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation's translation unit.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:795
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:309
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:384
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:144
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:350
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:353
LValue - This represents an lvalue references.
Definition: CGValue.h:171
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:419
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:352
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:811
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
void disableSanitizerForInstruction(llvm::Instruction *I)
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:203
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1420
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:4152
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1238
ValueDecl * getDecl()
Definition: Expr.h:1306
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:83
T * getAttr() const
Definition: DeclBase.h:556
ASTContext & getASTContext() const LLVM_READONLY
Definition: DeclBase.cpp:428
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1089
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:542
SourceLocation getLocation() const
Definition: DeclBase.h:432
bool hasAttr() const
Definition: DeclBase.h:560
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1542
bool isIgnored(unsigned DiagID, SourceLocation Loc) const
Determine whether the diagnostic is known to be ignored.
Definition: Diagnostic.h:911
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition: Expr.cpp:3801
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3042
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3026
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:330
QualType getType() const
Definition: Expr.h:142
ExtVectorType - Extended vector type.
Definition: Type.h:3498
LangOptions::FPExceptionModeKind getExceptionMode() const
Definition: LangOptions.h:754
bool allowFPContractAcrossStatement() const
Definition: LangOptions.h:729
RoundingMode getRoundingMode() const
Definition: LangOptions.h:742
Represents a member of a struct/union/class.
Definition: Decl.h:2941
Represents a function declaration or definition.
Definition: Decl.h:1917
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition: Decl.h:2517
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:3134
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3429
bool UsesFPIntrin() const
Determine whether the function was declared in source context that requires constrained FP intrinsics...
Definition: Decl.h:2700
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2398
QualType getReturnType() const
Definition: Decl.h:2636
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2582
FunctionDecl * getTemplateInstantiationPattern(bool ForDefinition=true) const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:3890
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:3174
bool isInlineBuiltinDeclaration() const
Determine if this function provides an inline implementation of a builtin.
Definition: Decl.cpp:3295
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2323
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program.
Definition: Decl.cpp:3166
bool isDefaulted() const
Whether this function is defaulted.
Definition: Decl.h:2280
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any.
Definition: Decl.cpp:3762
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4041
QualType desugar() const
Definition: Type.h:4470
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:105
const Decl * getDecl() const
Definition: GlobalDecl.h:103
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
@ Other
Other implicit parameter.
Definition: Decl.h:1682
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:5074
Represents the declaration of a label.
Definition: Decl.h:494
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:82
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:388
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:270
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Definition: LangOptions.h:272
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
Definition: LangOptions.h:274
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
Definition: LangOptions.h:276
RoundingMode getDefaultRoundingMode() const
Definition: LangOptions.h:642
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:313
Represents a parameter to a function.
Definition: Decl.h:1722
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:123
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2788
@ Forbid
Profiling is forbidden using the noprofile attribute.
Definition: ProfileList.h:37
@ Skip
Profiling is skipped using the skipprofile attribute.
Definition: ProfileList.h:35
@ Allow
Profiling is allowed.
Definition: ProfileList.h:33
A (possibly-)qualified type.
Definition: Type.h:736
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6732
field_range fields() const
Definition: Decl.h:4225
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4835
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:204
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:72
StmtClass getStmtClass() const
Definition: Stmt.h:1177
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:325
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1120
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1121
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1122
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1124
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1190
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1397
virtual std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts) const
Returns target-specific min and max values VScale_Range.
Definition: TargetInfo.h:951
The base class of the type hierarchy.
Definition: Type.h:1566
bool isVoidType() const
Definition: Type.h:7218
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2042
bool isPointerType() const
Definition: Type.h:6910
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:629
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2335
TypeClass getTypeClass() const
Definition: Type.h:1985
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7424
bool isObjCRetainableType() const
Definition: Type.cpp:4463
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:4222
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2176
QualType getType() const
Definition: Decl.h:712
Represents a variable declaration or definition.
Definition: Decl.h:913
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3181
Expr * getSizeExpr() const
Definition: Type.h:3200
QualType getElementType() const
Definition: Type.h:3418
Defines the clang::TargetInfo interface.
#define UINT_MAX
Definition: limits.h:60
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:42
constexpr XRayInstrMask FunctionExit
Definition: XRayInstr.h:40
constexpr XRayInstrMask FunctionEntry
Definition: XRayInstr.h:39
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:41
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1357
bool Call(InterpState &S, CodePtr &PC, const Function *Func)
Definition: Interp.h:1493
@ OpenCL
Definition: LangStandard.h:62
@ CPlusPlus
Definition: LangStandard.h:53
@ NonNull
Values of this type can never be null.
BinaryOperatorKind
@ OMF_initialize
@ C
Languages that the frontend can parse and compile.
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
@ Result
The result type of a method or function.
@ LCD_None
Definition: Lambda.h:23
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ EST_None
no exception specification
unsigned long uint64_t
YAML serialization mapping.
Definition: Dominators.h:30
This structure provides a set of types that are commonly used during IR emission.
llvm::PointerType * ConstGlobalsPtrTy
void* in the address space for constant globals
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:623
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:54
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:164
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:155
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:176
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:161
XRayInstrMask Mask
Definition: XRayInstr.h:65
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:48