clang 20.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "ConstantEmitter.h"
18#include "EHScopeStack.h"
19#include "TargetInfo.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/DeclCXX.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/Intrinsics.h"
31using namespace clang;
32using namespace CodeGen;
33
34//===----------------------------------------------------------------------===//
35// Aggregate Expression Emitter
36//===----------------------------------------------------------------------===//
37
38namespace llvm {
39extern cl::opt<bool> EnableSingleByteCoverage;
40} // namespace llvm
41
42namespace {
43class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
44 CodeGenFunction &CGF;
45 CGBuilderTy &Builder;
46 AggValueSlot Dest;
47 bool IsResultUnused;
48
49 AggValueSlot EnsureSlot(QualType T) {
50 if (!Dest.isIgnored()) return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored()) return;
55 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
56 }
57
58 // Calls `Fn` with a valid return value slot, potentially creating a temporary
59 // to do so. If a temporary is created, an appropriate copy into `Dest` will
60 // be emitted, as will lifetime markers.
61 //
62 // The given function should take a ReturnValueSlot, and return an RValue that
63 // points to said slot.
64 void withReturnValueSlot(const Expr *E,
65 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
66
67public:
68 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
69 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
70 IsResultUnused(IsResultUnused) { }
71
72 //===--------------------------------------------------------------------===//
73 // Utilities
74 //===--------------------------------------------------------------------===//
75
76 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
77 /// represents a value lvalue, this method emits the address of the lvalue,
78 /// then loads the result into DestPtr.
79 void EmitAggLoadOfLValue(const Expr *E);
80
81 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
82 /// SrcIsRValue is true if source comes from an RValue.
83 void EmitFinalDestCopy(QualType type, const LValue &src,
84 CodeGenFunction::ExprValueKind SrcValueKind =
85 CodeGenFunction::EVK_NonRValue);
86 void EmitFinalDestCopy(QualType type, RValue src);
87 void EmitCopy(QualType type, const AggValueSlot &dest,
88 const AggValueSlot &src);
89
90 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
91 Expr *ExprToVisit, ArrayRef<Expr *> Args,
92 Expr *ArrayFiller);
93
95 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
98 }
99
100 bool TypeRequiresGCollection(QualType T);
101
102 //===--------------------------------------------------------------------===//
103 // Visitor Methods
104 //===--------------------------------------------------------------------===//
105
106 void Visit(Expr *E) {
107 ApplyDebugLocation DL(CGF, E);
109 }
110
111 void VisitStmt(Stmt *S) {
112 CGF.ErrorUnsupported(S, "aggregate expression");
113 }
114 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
115 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
116 Visit(GE->getResultExpr());
117 }
118 void VisitCoawaitExpr(CoawaitExpr *E) {
119 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
120 }
121 void VisitCoyieldExpr(CoyieldExpr *E) {
122 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
123 }
124 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
125 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
126 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
127 return Visit(E->getReplacement());
128 }
129
130 void VisitConstantExpr(ConstantExpr *E) {
131 EnsureDest(E->getType());
132
133 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
134 Address StoreDest = Dest.getAddress();
135 // The emitted value is guaranteed to have the same size as the
136 // destination but can have a different type. Just do a bitcast in this
137 // case to avoid incorrect GEPs.
138 if (Result->getType() != StoreDest.getType())
139 StoreDest = StoreDest.withElementType(Result->getType());
140
141 CGF.EmitAggregateStore(Result, StoreDest,
143 return;
144 }
145 return Visit(E->getSubExpr());
146 }
147
148 // l-values.
149 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
150 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
151 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
152 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
153 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
154 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
155 EmitAggLoadOfLValue(E);
156 }
157 void VisitPredefinedExpr(const PredefinedExpr *E) {
158 EmitAggLoadOfLValue(E);
159 }
160
161 // Operators.
162 void VisitCastExpr(CastExpr *E);
163 void VisitCallExpr(const CallExpr *E);
164 void VisitStmtExpr(const StmtExpr *E);
165 void VisitBinaryOperator(const BinaryOperator *BO);
166 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
167 void VisitBinAssign(const BinaryOperator *E);
168 void VisitBinComma(const BinaryOperator *E);
169 void VisitBinCmp(const BinaryOperator *E);
170 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
171 Visit(E->getSemanticForm());
172 }
173
174 void VisitObjCMessageExpr(ObjCMessageExpr *E);
175 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
176 EmitAggLoadOfLValue(E);
177 }
178
179 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
180 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
181 void VisitChooseExpr(const ChooseExpr *CE);
182 void VisitInitListExpr(InitListExpr *E);
183 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
184 FieldDecl *InitializedFieldInUnion,
185 Expr *ArrayFiller);
186 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
187 llvm::Value *outerBegin = nullptr);
188 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
189 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
190 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
191 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
192 Visit(DAE->getExpr());
193 }
194 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
195 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
196 Visit(DIE->getExpr());
197 }
198 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
199 void VisitCXXConstructExpr(const CXXConstructExpr *E);
200 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
201 void VisitLambdaExpr(LambdaExpr *E);
202 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
203 void VisitExprWithCleanups(ExprWithCleanups *E);
204 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
205 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
206 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
207 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
208
209 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
210 if (E->isGLValue()) {
212 return EmitFinalDestCopy(E->getType(), LV);
213 }
214
215 AggValueSlot Slot = EnsureSlot(E->getType());
216 bool NeedsDestruction =
217 !Slot.isExternallyDestructed() &&
219 if (NeedsDestruction)
221 CGF.EmitPseudoObjectRValue(E, Slot);
222 if (NeedsDestruction)
224 E->getType());
225 }
226
227 void VisitVAArgExpr(VAArgExpr *E);
228 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
229 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
230 Expr *ArrayFiller);
231
232 void EmitInitializationToLValue(Expr *E, LValue Address);
233 void EmitNullInitializationToLValue(LValue Address);
234 // case Expr::ChooseExprClass:
235 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
236 void VisitAtomicExpr(AtomicExpr *E) {
237 RValue Res = CGF.EmitAtomicExpr(E);
238 EmitFinalDestCopy(E->getType(), Res);
239 }
240 void VisitPackIndexingExpr(PackIndexingExpr *E) {
241 Visit(E->getSelectedExpr());
242 }
243};
244} // end anonymous namespace.
245
246//===----------------------------------------------------------------------===//
247// Utilities
248//===----------------------------------------------------------------------===//
249
250/// EmitAggLoadOfLValue - Given an expression with aggregate type that
251/// represents a value lvalue, this method emits the address of the lvalue,
252/// then loads the result into DestPtr.
253void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
254 LValue LV = CGF.EmitLValue(E);
255
256 // If the type of the l-value is atomic, then do an atomic load.
258 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
259 return;
260 }
261
262 EmitFinalDestCopy(E->getType(), LV);
263}
264
265/// True if the given aggregate type requires special GC API calls.
266bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
267 // Only record types have members that might require garbage collection.
268 const RecordType *RecordTy = T->getAs<RecordType>();
269 if (!RecordTy) return false;
270
271 // Don't mess with non-trivial C++ types.
272 RecordDecl *Record = RecordTy->getDecl();
273 if (isa<CXXRecordDecl>(Record) &&
274 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
275 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
276 return false;
277
278 // Check whether the type has an object member.
279 return Record->hasObjectMember();
280}
281
282void AggExprEmitter::withReturnValueSlot(
283 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
284 QualType RetTy = E->getType();
285 bool RequiresDestruction =
286 !Dest.isExternallyDestructed() &&
288
289 // If it makes no observable difference, save a memcpy + temporary.
290 //
291 // We need to always provide our own temporary if destruction is required.
292 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
293 // its lifetime before we have the chance to emit a proper destructor call.
294 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
295 (RequiresDestruction && Dest.isIgnored());
296
297 Address RetAddr = Address::invalid();
298 RawAddress RetAllocaAddr = RawAddress::invalid();
299
300 EHScopeStack::stable_iterator LifetimeEndBlock;
301 llvm::Value *LifetimeSizePtr = nullptr;
302 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
303 if (!UseTemp) {
304 RetAddr = Dest.getAddress();
305 } else {
306 RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
307 llvm::TypeSize Size =
308 CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
309 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
310 if (LifetimeSizePtr) {
311 LifetimeStartInst =
312 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
313 assert(LifetimeStartInst->getIntrinsicID() ==
314 llvm::Intrinsic::lifetime_start &&
315 "Last insertion wasn't a lifetime.start?");
316
317 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
318 NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
319 LifetimeEndBlock = CGF.EHStack.stable_begin();
320 }
321 }
322
323 RValue Src =
324 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
325 Dest.isExternallyDestructed()));
326
327 if (!UseTemp)
328 return;
329
330 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
331 Src.getAggregatePointer(E->getType(), CGF));
332 EmitFinalDestCopy(E->getType(), Src);
333
334 if (!RequiresDestruction && LifetimeStartInst) {
335 // If there's no dtor to run, the copy was the last use of our temporary.
336 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
337 // eagerly.
338 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
339 CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
340 }
341}
342
343/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
344void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
345 assert(src.isAggregate() && "value must be aggregate value!");
346 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
347 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
348}
349
350/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
351void AggExprEmitter::EmitFinalDestCopy(
352 QualType type, const LValue &src,
353 CodeGenFunction::ExprValueKind SrcValueKind) {
354 // If Dest is ignored, then we're evaluating an aggregate expression
355 // in a context that doesn't care about the result. Note that loads
356 // from volatile l-values force the existence of a non-ignored
357 // destination.
358 if (Dest.isIgnored())
359 return;
360
361 // Copy non-trivial C structs here.
362 LValue DstLV = CGF.MakeAddrLValue(
363 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
364
365 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
366 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
367 if (Dest.isPotentiallyAliased())
368 CGF.callCStructMoveAssignmentOperator(DstLV, src);
369 else
370 CGF.callCStructMoveConstructor(DstLV, src);
371 return;
372 }
373 } else {
374 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
375 if (Dest.isPotentiallyAliased())
376 CGF.callCStructCopyAssignmentOperator(DstLV, src);
377 else
378 CGF.callCStructCopyConstructor(DstLV, src);
379 return;
380 }
381 }
382
386 EmitCopy(type, Dest, srcAgg);
387}
388
389/// Perform a copy from the source into the destination.
390///
391/// \param type - the type of the aggregate being copied; qualifiers are
392/// ignored
393void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
394 const AggValueSlot &src) {
395 if (dest.requiresGCollection()) {
396 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
397 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
399 dest.getAddress(),
400 src.getAddress(),
401 size);
402 return;
403 }
404
405 // If the result of the assignment is used, copy the LHS there also.
406 // It's volatile if either side is. Use the minimum alignment of
407 // the two sides.
408 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
409 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
410 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
411 dest.isVolatile() || src.isVolatile());
412}
413
414/// Emit the initializer for a std::initializer_list initialized with a
415/// real initializer list.
416void
417AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
418 // Emit an array containing the elements. The array is externally destructed
419 // if the std::initializer_list object is.
420 ASTContext &Ctx = CGF.getContext();
421 LValue Array = CGF.EmitLValue(E->getSubExpr());
422 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
423 Address ArrayPtr = Array.getAddress();
424
426 Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
427 assert(ArrayType && "std::initializer_list constructed from non-array");
428
429 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
430 RecordDecl::field_iterator Field = Record->field_begin();
431 assert(Field != Record->field_end() &&
432 Ctx.hasSameType(Field->getType()->getPointeeType(),
434 "Expected std::initializer_list first field to be const E *");
435
436 // Start pointer.
437 AggValueSlot Dest = EnsureSlot(E->getType());
438 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
439 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
440 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
441 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
442 ++Field;
443 assert(Field != Record->field_end() &&
444 "Expected std::initializer_list to have two fields");
445
446 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
447 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
448 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
449 // Length.
450 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
451
452 } else {
453 // End pointer.
454 assert(Field->getType()->isPointerType() &&
455 Ctx.hasSameType(Field->getType()->getPointeeType(),
457 "Expected std::initializer_list second field to be const E *");
458 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
459 llvm::Value *IdxEnd[] = { Zero, Size };
460 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
461 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
462 "arrayend");
463 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
464 }
465
466 assert(++Field == Record->field_end() &&
467 "Expected std::initializer_list to only have two fields");
468}
469
470/// Determine if E is a trivial array filler, that is, one that is
471/// equivalent to zero-initialization.
472static bool isTrivialFiller(Expr *E) {
473 if (!E)
474 return true;
475
476 if (isa<ImplicitValueInitExpr>(E))
477 return true;
478
479 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
480 if (ILE->getNumInits())
481 return false;
482 return isTrivialFiller(ILE->getArrayFiller());
483 }
484
485 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
486 return Cons->getConstructor()->isDefaultConstructor() &&
487 Cons->getConstructor()->isTrivial();
488
489 // FIXME: Are there other cases where we can avoid emitting an initializer?
490 return false;
491}
492
493/// Emit initialization of an array from an initializer list. ExprToVisit must
494/// be either an InitListEpxr a CXXParenInitListExpr.
495void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
496 QualType ArrayQTy, Expr *ExprToVisit,
497 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
498 uint64_t NumInitElements = Args.size();
499
500 uint64_t NumArrayElements = AType->getNumElements();
501 for (const auto *Init : Args) {
502 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
503 NumInitElements += Embed->getDataElementCount() - 1;
504 if (NumInitElements > NumArrayElements) {
505 NumInitElements = NumArrayElements;
506 break;
507 }
508 }
509 }
510
511 assert(NumInitElements <= NumArrayElements);
512
513 QualType elementType =
514 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
515 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
516 CharUnits elementAlign =
517 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
518 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
519
520 // Consider initializing the array by copying from a global. For this to be
521 // more efficient than per-element initialization, the size of the elements
522 // with explicit initializers should be large enough.
523 if (NumInitElements * elementSize.getQuantity() > 16 &&
524 elementType.isTriviallyCopyableType(CGF.getContext())) {
525 CodeGen::CodeGenModule &CGM = CGF.CGM;
527 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
528 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
530 LangAS AS = GVArrayQTy.getAddressSpace();
531 if (llvm::Constant *C =
532 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
533 auto GV = new llvm::GlobalVariable(
534 CGM.getModule(), C->getType(),
535 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
536 "constinit",
537 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
539 Emitter.finalize(GV);
540 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
541 GV->setAlignment(Align.getAsAlign());
542 Address GVAddr(GV, GV->getValueType(), Align);
543 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
544 return;
545 }
546 }
547
548 // Exception safety requires us to destroy all the
549 // already-constructed members if an initializer throws.
550 // For that, we'll need an EH cleanup.
551 QualType::DestructionKind dtorKind = elementType.isDestructedType();
552 Address endOfInit = Address::invalid();
553 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
554
555 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
556 if (dtorKind) {
557 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
558 // In principle we could tell the cleanup where we are more
559 // directly, but the control flow can get so varied here that it
560 // would actually be quite complex. Therefore we go through an
561 // alloca.
562 llvm::Instruction *dominatingIP =
563 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
564 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
565 "arrayinit.endOfInit");
566 Builder.CreateStore(begin, endOfInit);
567 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
568 elementAlign,
569 CGF.getDestroyer(dtorKind));
570 cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
571 .AddAuxAllocas(allocaTracker.Take());
572
574 {CGF.EHStack.stable_begin(), dominatingIP});
575 }
576
577 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
578
579 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
580 llvm::Value *element = begin;
581 if (ArrayIndex > 0) {
582 element = Builder.CreateInBoundsGEP(
583 llvmElementType, begin,
584 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
585
586 // Tell the cleanup that it needs to destroy up to this
587 // element. TODO: some of these stores can be trivially
588 // observed to be unnecessary.
589 if (endOfInit.isValid())
590 Builder.CreateStore(element, endOfInit);
591 }
592
593 LValue elementLV = CGF.MakeAddrLValue(
594 Address(element, llvmElementType, elementAlign), elementType);
595 EmitInitializationToLValue(Init, elementLV);
596 return true;
597 };
598
599 unsigned ArrayIndex = 0;
600 // Emit the explicit initializers.
601 for (uint64_t i = 0; i != NumInitElements; ++i) {
602 if (ArrayIndex >= NumInitElements)
603 break;
604 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
605 EmbedS->doForEachDataElement(Emit, ArrayIndex);
606 } else {
607 Emit(Args[i], ArrayIndex);
608 ArrayIndex++;
609 }
610 }
611
612 // Check whether there's a non-trivial array-fill expression.
613 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
614
615 // Any remaining elements need to be zero-initialized, possibly
616 // using the filler expression. We can skip this if the we're
617 // emitting to zeroed memory.
618 if (NumInitElements != NumArrayElements &&
619 !(Dest.isZeroed() && hasTrivialFiller &&
620 CGF.getTypes().isZeroInitializable(elementType))) {
621
622 // Use an actual loop. This is basically
623 // do { *array++ = filler; } while (array != end);
624
625 // Advance to the start of the rest of the array.
626 llvm::Value *element = begin;
627 if (NumInitElements) {
628 element = Builder.CreateInBoundsGEP(
629 llvmElementType, element,
630 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
631 "arrayinit.start");
632 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
633 }
634
635 // Compute the end of the array.
636 llvm::Value *end = Builder.CreateInBoundsGEP(
637 llvmElementType, begin,
638 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
639
640 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
641 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
642
643 // Jump into the body.
644 CGF.EmitBlock(bodyBB);
645 llvm::PHINode *currentElement =
646 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
647 currentElement->addIncoming(element, entryBB);
648
649 // Emit the actual filler expression.
650 {
651 // C++1z [class.temporary]p5:
652 // when a default constructor is called to initialize an element of
653 // an array with no corresponding initializer [...] the destruction of
654 // every temporary created in a default argument is sequenced before
655 // the construction of the next array element, if any
656 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
657 LValue elementLV = CGF.MakeAddrLValue(
658 Address(currentElement, llvmElementType, elementAlign), elementType);
659 if (ArrayFiller)
660 EmitInitializationToLValue(ArrayFiller, elementLV);
661 else
662 EmitNullInitializationToLValue(elementLV);
663 }
664
665 // Move on to the next element.
666 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
667 llvmElementType, currentElement, one, "arrayinit.next");
668
669 // Tell the EH cleanup that we finished with the last element.
670 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
671
672 // Leave the loop if we're done.
673 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
674 "arrayinit.done");
675 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
676 Builder.CreateCondBr(done, endBB, bodyBB);
677 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
678
679 CGF.EmitBlock(endBB);
680 }
681}
682
683//===----------------------------------------------------------------------===//
684// Visitor Methods
685//===----------------------------------------------------------------------===//
686
687void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
688 Visit(E->getSubExpr());
689}
690
691void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
692 // If this is a unique OVE, just visit its source expression.
693 if (e->isUnique())
694 Visit(e->getSourceExpr());
695 else
696 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
697}
698
699void
700AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
701 if (Dest.isPotentiallyAliased() &&
702 E->getType().isPODType(CGF.getContext())) {
703 // For a POD type, just emit a load of the lvalue + a copy, because our
704 // compound literal might alias the destination.
705 EmitAggLoadOfLValue(E);
706 return;
707 }
708
709 AggValueSlot Slot = EnsureSlot(E->getType());
710
711 // Block-scope compound literals are destroyed at the end of the enclosing
712 // scope in C.
713 bool Destruct =
714 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
715 if (Destruct)
717
718 CGF.EmitAggExpr(E->getInitializer(), Slot);
719
720 if (Destruct)
723 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
724 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
725}
726
727/// Attempt to look through various unimportant expressions to find a
728/// cast of the given kind.
729static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
730 op = op->IgnoreParenNoopCasts(ctx);
731 if (auto castE = dyn_cast<CastExpr>(op)) {
732 if (castE->getCastKind() == kind)
733 return castE->getSubExpr();
734 }
735 return nullptr;
736}
737
738void AggExprEmitter::VisitCastExpr(CastExpr *E) {
739 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
740 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
741 switch (E->getCastKind()) {
742 case CK_Dynamic: {
743 // FIXME: Can this actually happen? We have no test coverage for it.
744 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
745 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
746 CodeGenFunction::TCK_Load);
747 // FIXME: Do we also need to handle property references here?
748 if (LV.isSimple())
749 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
750 else
751 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
752
753 if (!Dest.isIgnored())
754 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
755 break;
756 }
757
758 case CK_ToUnion: {
759 // Evaluate even if the destination is ignored.
760 if (Dest.isIgnored()) {
761 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
762 /*ignoreResult=*/true);
763 break;
764 }
765
766 // GCC union extension
767 QualType Ty = E->getSubExpr()->getType();
768 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
769 EmitInitializationToLValue(E->getSubExpr(),
770 CGF.MakeAddrLValue(CastPtr, Ty));
771 break;
772 }
773
774 case CK_LValueToRValueBitCast: {
775 if (Dest.isIgnored()) {
776 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
777 /*ignoreResult=*/true);
778 break;
779 }
780
781 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
782 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
783 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
784 llvm::Value *SizeVal = llvm::ConstantInt::get(
785 CGF.SizeTy,
787 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
788 break;
789 }
790
791 case CK_DerivedToBase:
792 case CK_BaseToDerived:
793 case CK_UncheckedDerivedToBase: {
794 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
795 "should have been unpacked before we got here");
796 }
797
798 case CK_NonAtomicToAtomic:
799 case CK_AtomicToNonAtomic: {
800 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
801
802 // Determine the atomic and value types.
803 QualType atomicType = E->getSubExpr()->getType();
804 QualType valueType = E->getType();
805 if (isToAtomic) std::swap(atomicType, valueType);
806
807 assert(atomicType->isAtomicType());
808 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
809 atomicType->castAs<AtomicType>()->getValueType()));
810
811 // Just recurse normally if we're ignoring the result or the
812 // atomic type doesn't change representation.
813 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
814 return Visit(E->getSubExpr());
815 }
816
817 CastKind peepholeTarget =
818 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
819
820 // These two cases are reverses of each other; try to peephole them.
821 if (Expr *op =
822 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
823 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
824 E->getType()) &&
825 "peephole significantly changed types?");
826 return Visit(op);
827 }
828
829 // If we're converting an r-value of non-atomic type to an r-value
830 // of atomic type, just emit directly into the relevant sub-object.
831 if (isToAtomic) {
832 AggValueSlot valueDest = Dest;
833 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
834 // Zero-initialize. (Strictly speaking, we only need to initialize
835 // the padding at the end, but this is simpler.)
836 if (!Dest.isZeroed())
838
839 // Build a GEP to refer to the subobject.
840 Address valueAddr =
841 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
842 valueDest = AggValueSlot::forAddr(valueAddr,
843 valueDest.getQualifiers(),
844 valueDest.isExternallyDestructed(),
845 valueDest.requiresGCollection(),
846 valueDest.isPotentiallyAliased(),
849 }
850
851 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
852 return;
853 }
854
855 // Otherwise, we're converting an atomic type to a non-atomic type.
856 // Make an atomic temporary, emit into that, and then copy the value out.
857 AggValueSlot atomicSlot =
858 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
859 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
860
861 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
862 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
863 return EmitFinalDestCopy(valueType, rvalue);
864 }
865 case CK_AddressSpaceConversion:
866 return Visit(E->getSubExpr());
867
868 case CK_LValueToRValue:
869 // If we're loading from a volatile type, force the destination
870 // into existence.
871 if (E->getSubExpr()->getType().isVolatileQualified()) {
872 bool Destruct =
873 !Dest.isExternallyDestructed() &&
875 if (Destruct)
877 EnsureDest(E->getType());
878 Visit(E->getSubExpr());
879
880 if (Destruct)
882 E->getType());
883
884 return;
885 }
886
887 [[fallthrough]];
888
889 case CK_HLSLArrayRValue:
890 Visit(E->getSubExpr());
891 break;
892
893 case CK_NoOp:
894 case CK_UserDefinedConversion:
895 case CK_ConstructorConversion:
896 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
897 E->getType()) &&
898 "Implicit cast types must be compatible");
899 Visit(E->getSubExpr());
900 break;
901
902 case CK_LValueBitCast:
903 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
904
905 case CK_Dependent:
906 case CK_BitCast:
907 case CK_ArrayToPointerDecay:
908 case CK_FunctionToPointerDecay:
909 case CK_NullToPointer:
910 case CK_NullToMemberPointer:
911 case CK_BaseToDerivedMemberPointer:
912 case CK_DerivedToBaseMemberPointer:
913 case CK_MemberPointerToBoolean:
914 case CK_ReinterpretMemberPointer:
915 case CK_IntegralToPointer:
916 case CK_PointerToIntegral:
917 case CK_PointerToBoolean:
918 case CK_ToVoid:
919 case CK_VectorSplat:
920 case CK_IntegralCast:
921 case CK_BooleanToSignedIntegral:
922 case CK_IntegralToBoolean:
923 case CK_IntegralToFloating:
924 case CK_FloatingToIntegral:
925 case CK_FloatingToBoolean:
926 case CK_FloatingCast:
927 case CK_CPointerToObjCPointerCast:
928 case CK_BlockPointerToObjCPointerCast:
929 case CK_AnyPointerToBlockPointerCast:
930 case CK_ObjCObjectLValueCast:
931 case CK_FloatingRealToComplex:
932 case CK_FloatingComplexToReal:
933 case CK_FloatingComplexToBoolean:
934 case CK_FloatingComplexCast:
935 case CK_FloatingComplexToIntegralComplex:
936 case CK_IntegralRealToComplex:
937 case CK_IntegralComplexToReal:
938 case CK_IntegralComplexToBoolean:
939 case CK_IntegralComplexCast:
940 case CK_IntegralComplexToFloatingComplex:
941 case CK_ARCProduceObject:
942 case CK_ARCConsumeObject:
943 case CK_ARCReclaimReturnedObject:
944 case CK_ARCExtendBlockObject:
945 case CK_CopyAndAutoreleaseBlockObject:
946 case CK_BuiltinFnToFnPtr:
947 case CK_ZeroToOCLOpaqueType:
948 case CK_MatrixCast:
949 case CK_HLSLVectorTruncation:
950
951 case CK_IntToOCLSampler:
952 case CK_FloatingToFixedPoint:
953 case CK_FixedPointToFloating:
954 case CK_FixedPointCast:
955 case CK_FixedPointToBoolean:
956 case CK_FixedPointToIntegral:
957 case CK_IntegralToFixedPoint:
958 llvm_unreachable("cast kind invalid for aggregate types");
959 }
960}
961
962void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
963 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
964 EmitAggLoadOfLValue(E);
965 return;
966 }
967
968 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
969 return CGF.EmitCallExpr(E, Slot);
970 });
971}
972
973void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
974 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
975 return CGF.EmitObjCMessageExpr(E, Slot);
976 });
977}
978
979void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
980 CGF.EmitIgnoredExpr(E->getLHS());
981 Visit(E->getRHS());
982}
983
984void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
985 CodeGenFunction::StmtExprEvaluation eval(CGF);
986 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
987}
988
993};
994
995static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
996 const BinaryOperator *E, llvm::Value *LHS,
997 llvm::Value *RHS, CompareKind Kind,
998 const char *NameSuffix = "") {
999 QualType ArgTy = E->getLHS()->getType();
1000 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1001 ArgTy = CT->getElementType();
1002
1003 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1004 assert(Kind == CK_Equal &&
1005 "member pointers may only be compared for equality");
1007 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1008 }
1009
1010 // Compute the comparison instructions for the specified comparison kind.
1011 struct CmpInstInfo {
1012 const char *Name;
1013 llvm::CmpInst::Predicate FCmp;
1014 llvm::CmpInst::Predicate SCmp;
1015 llvm::CmpInst::Predicate UCmp;
1016 };
1017 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1018 using FI = llvm::FCmpInst;
1019 using II = llvm::ICmpInst;
1020 switch (Kind) {
1021 case CK_Less:
1022 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1023 case CK_Greater:
1024 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1025 case CK_Equal:
1026 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1027 }
1028 llvm_unreachable("Unrecognised CompareKind enum");
1029 }();
1030
1031 if (ArgTy->hasFloatingRepresentation())
1032 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1033 llvm::Twine(InstInfo.Name) + NameSuffix);
1034 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1035 auto Inst =
1036 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1037 return Builder.CreateICmp(Inst, LHS, RHS,
1038 llvm::Twine(InstInfo.Name) + NameSuffix);
1039 }
1040
1041 llvm_unreachable("unsupported aggregate binary expression should have "
1042 "already been handled");
1043}
1044
1045void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1046 using llvm::BasicBlock;
1047 using llvm::PHINode;
1048 using llvm::Value;
1049 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1050 E->getRHS()->getType()));
1051 const ComparisonCategoryInfo &CmpInfo =
1053 assert(CmpInfo.Record->isTriviallyCopyable() &&
1054 "cannot copy non-trivially copyable aggregate");
1055
1056 QualType ArgTy = E->getLHS()->getType();
1057
1058 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1059 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1060 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1061 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1062 }
1063 bool IsComplex = ArgTy->isAnyComplexType();
1064
1065 // Evaluate the operands to the expression and extract their values.
1066 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1067 RValue RV = CGF.EmitAnyExpr(E);
1068 if (RV.isScalar())
1069 return {RV.getScalarVal(), nullptr};
1070 if (RV.isAggregate())
1071 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1072 assert(RV.isComplex());
1073 return RV.getComplexVal();
1074 };
1075 auto LHSValues = EmitOperand(E->getLHS()),
1076 RHSValues = EmitOperand(E->getRHS());
1077
1078 auto EmitCmp = [&](CompareKind K) {
1079 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1080 K, IsComplex ? ".r" : "");
1081 if (!IsComplex)
1082 return Cmp;
1083 assert(K == CompareKind::CK_Equal);
1084 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1085 RHSValues.second, K, ".i");
1086 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1087 };
1088 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1089 return Builder.getInt(VInfo->getIntValue());
1090 };
1091
1092 Value *Select;
1093 if (ArgTy->isNullPtrType()) {
1094 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1095 } else if (!CmpInfo.isPartial()) {
1096 Value *SelectOne =
1097 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1098 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1099 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1100 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1101 SelectOne, "sel.eq");
1102 } else {
1103 Value *SelectEq = Builder.CreateSelect(
1104 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1105 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1106 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1107 EmitCmpRes(CmpInfo.getGreater()),
1108 SelectEq, "sel.gt");
1109 Select = Builder.CreateSelect(
1110 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1111 }
1112 // Create the return value in the destination slot.
1113 EnsureDest(E->getType());
1114 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1115
1116 // Emit the address of the first (and only) field in the comparison category
1117 // type, and initialize it from the constant integer value selected above.
1119 DestLV, *CmpInfo.Record->field_begin());
1120 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1121
1122 // All done! The result is in the Dest slot.
1123}
1124
1125void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1126 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1127 VisitPointerToDataMemberBinaryOperator(E);
1128 else
1129 CGF.ErrorUnsupported(E, "aggregate binary expression");
1130}
1131
1132void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1133 const BinaryOperator *E) {
1135 EmitFinalDestCopy(E->getType(), LV);
1136}
1137
1138/// Is the value of the given expression possibly a reference to or
1139/// into a __block variable?
1140static bool isBlockVarRef(const Expr *E) {
1141 // Make sure we look through parens.
1142 E = E->IgnoreParens();
1143
1144 // Check for a direct reference to a __block variable.
1145 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1146 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1147 return (var && var->hasAttr<BlocksAttr>());
1148 }
1149
1150 // More complicated stuff.
1151
1152 // Binary operators.
1153 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1154 // For an assignment or pointer-to-member operation, just care
1155 // about the LHS.
1156 if (op->isAssignmentOp() || op->isPtrMemOp())
1157 return isBlockVarRef(op->getLHS());
1158
1159 // For a comma, just care about the RHS.
1160 if (op->getOpcode() == BO_Comma)
1161 return isBlockVarRef(op->getRHS());
1162
1163 // FIXME: pointer arithmetic?
1164 return false;
1165
1166 // Check both sides of a conditional operator.
1167 } else if (const AbstractConditionalOperator *op
1168 = dyn_cast<AbstractConditionalOperator>(E)) {
1169 return isBlockVarRef(op->getTrueExpr())
1170 || isBlockVarRef(op->getFalseExpr());
1171
1172 // OVEs are required to support BinaryConditionalOperators.
1173 } else if (const OpaqueValueExpr *op
1174 = dyn_cast<OpaqueValueExpr>(E)) {
1175 if (const Expr *src = op->getSourceExpr())
1176 return isBlockVarRef(src);
1177
1178 // Casts are necessary to get things like (*(int*)&var) = foo().
1179 // We don't really care about the kind of cast here, except
1180 // we don't want to look through l2r casts, because it's okay
1181 // to get the *value* in a __block variable.
1182 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1183 if (cast->getCastKind() == CK_LValueToRValue)
1184 return false;
1185 return isBlockVarRef(cast->getSubExpr());
1186
1187 // Handle unary operators. Again, just aggressively look through
1188 // it, ignoring the operation.
1189 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1190 return isBlockVarRef(uop->getSubExpr());
1191
1192 // Look into the base of a field access.
1193 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1194 return isBlockVarRef(mem->getBase());
1195
1196 // Look into the base of a subscript.
1197 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1198 return isBlockVarRef(sub->getBase());
1199 }
1200
1201 return false;
1202}
1203
1204void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1205 // For an assignment to work, the value on the right has
1206 // to be compatible with the value on the left.
1207 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1208 E->getRHS()->getType())
1209 && "Invalid assignment");
1210
1211 // If the LHS might be a __block variable, and the RHS can
1212 // potentially cause a block copy, we need to evaluate the RHS first
1213 // so that the assignment goes the right place.
1214 // This is pretty semantically fragile.
1215 if (isBlockVarRef(E->getLHS()) &&
1216 E->getRHS()->HasSideEffects(CGF.getContext())) {
1217 // Ensure that we have a destination, and evaluate the RHS into that.
1218 EnsureDest(E->getRHS()->getType());
1219 Visit(E->getRHS());
1220
1221 // Now emit the LHS and copy into it.
1222 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1223
1224 // That copy is an atomic copy if the LHS is atomic.
1225 if (LHS.getType()->isAtomicType() ||
1227 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1228 return;
1229 }
1230
1231 EmitCopy(E->getLHS()->getType(),
1233 needsGC(E->getLHS()->getType()),
1236 Dest);
1237 return;
1238 }
1239
1240 LValue LHS = CGF.EmitLValue(E->getLHS());
1241
1242 // If we have an atomic type, evaluate into the destination and then
1243 // do an atomic copy.
1244 if (LHS.getType()->isAtomicType() ||
1246 EnsureDest(E->getRHS()->getType());
1247 Visit(E->getRHS());
1248 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1249 return;
1250 }
1251
1252 // Codegen the RHS so that it stores directly into the LHS.
1254 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1256 // A non-volatile aggregate destination might have volatile member.
1257 if (!LHSSlot.isVolatile() &&
1258 CGF.hasVolatileMember(E->getLHS()->getType()))
1259 LHSSlot.setVolatile(true);
1260
1261 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1262
1263 // Copy into the destination if the assignment isn't ignored.
1264 EmitFinalDestCopy(E->getType(), LHS);
1265
1266 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1269 E->getType());
1270}
1271
1272void AggExprEmitter::
1273VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1274 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1275 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1276 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1277
1278 // Bind the common expression if necessary.
1279 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1280
1281 CodeGenFunction::ConditionalEvaluation eval(CGF);
1282 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1283 CGF.getProfileCount(E));
1284
1285 // Save whether the destination's lifetime is externally managed.
1286 bool isExternallyDestructed = Dest.isExternallyDestructed();
1287 bool destructNonTrivialCStruct =
1288 !isExternallyDestructed &&
1290 isExternallyDestructed |= destructNonTrivialCStruct;
1291 Dest.setExternallyDestructed(isExternallyDestructed);
1292
1293 eval.begin(CGF);
1294 CGF.EmitBlock(LHSBlock);
1296 CGF.incrementProfileCounter(E->getTrueExpr());
1297 else
1299 Visit(E->getTrueExpr());
1300 eval.end(CGF);
1301
1302 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1303 CGF.Builder.CreateBr(ContBlock);
1304
1305 // If the result of an agg expression is unused, then the emission
1306 // of the LHS might need to create a destination slot. That's fine
1307 // with us, and we can safely emit the RHS into the same slot, but
1308 // we shouldn't claim that it's already being destructed.
1309 Dest.setExternallyDestructed(isExternallyDestructed);
1310
1311 eval.begin(CGF);
1312 CGF.EmitBlock(RHSBlock);
1314 CGF.incrementProfileCounter(E->getFalseExpr());
1315 Visit(E->getFalseExpr());
1316 eval.end(CGF);
1317
1318 if (destructNonTrivialCStruct)
1320 E->getType());
1321
1322 CGF.EmitBlock(ContBlock);
1325}
1326
1327void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1328 Visit(CE->getChosenSubExpr());
1329}
1330
1331void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1332 Address ArgValue = Address::invalid();
1333 CGF.EmitVAArg(VE, ArgValue, Dest);
1334
1335 // If EmitVAArg fails, emit an error.
1336 if (!ArgValue.isValid()) {
1337 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1338 return;
1339 }
1340}
1341
1342void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1343 // Ensure that we have a slot, but if we already do, remember
1344 // whether it was externally destructed.
1345 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1346 EnsureDest(E->getType());
1347
1348 // We're going to push a destructor if there isn't already one.
1350
1351 Visit(E->getSubExpr());
1352
1353 // Push that destructor we promised.
1354 if (!wasExternallyDestructed)
1355 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1356}
1357
1358void
1359AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1360 AggValueSlot Slot = EnsureSlot(E->getType());
1361 CGF.EmitCXXConstructExpr(E, Slot);
1362}
1363
1364void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1365 const CXXInheritedCtorInitExpr *E) {
1366 AggValueSlot Slot = EnsureSlot(E->getType());
1368 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1369 E->inheritedFromVBase(), E);
1370}
1371
1372void
1373AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1374 AggValueSlot Slot = EnsureSlot(E->getType());
1375 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1376
1377 // We'll need to enter cleanup scopes in case any of the element
1378 // initializers throws an exception or contains branch out of the expressions.
1379 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1380
1381 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1382 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1383 e = E->capture_init_end();
1384 i != e; ++i, ++CurField) {
1385 // Emit initialization
1386 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1387 if (CurField->hasCapturedVLAType()) {
1388 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1389 continue;
1390 }
1391
1392 EmitInitializationToLValue(*i, LV);
1393
1394 // Push a destructor if necessary.
1395 if (QualType::DestructionKind DtorKind =
1396 CurField->getType().isDestructedType()) {
1397 assert(LV.isSimple());
1398 if (DtorKind)
1400 CurField->getType(),
1401 CGF.getDestroyer(DtorKind), false);
1402 }
1403 }
1404}
1405
1406void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1407 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1408 Visit(E->getSubExpr());
1409}
1410
1411void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1412 QualType T = E->getType();
1413 AggValueSlot Slot = EnsureSlot(T);
1414 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1415}
1416
1417void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1418 QualType T = E->getType();
1419 AggValueSlot Slot = EnsureSlot(T);
1420 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1421}
1422
1423/// Determine whether the given cast kind is known to always convert values
1424/// with all zero bits in their value representation to values with all zero
1425/// bits in their value representation.
1426static bool castPreservesZero(const CastExpr *CE) {
1427 switch (CE->getCastKind()) {
1428 // No-ops.
1429 case CK_NoOp:
1430 case CK_UserDefinedConversion:
1431 case CK_ConstructorConversion:
1432 case CK_BitCast:
1433 case CK_ToUnion:
1434 case CK_ToVoid:
1435 // Conversions between (possibly-complex) integral, (possibly-complex)
1436 // floating-point, and bool.
1437 case CK_BooleanToSignedIntegral:
1438 case CK_FloatingCast:
1439 case CK_FloatingComplexCast:
1440 case CK_FloatingComplexToBoolean:
1441 case CK_FloatingComplexToIntegralComplex:
1442 case CK_FloatingComplexToReal:
1443 case CK_FloatingRealToComplex:
1444 case CK_FloatingToBoolean:
1445 case CK_FloatingToIntegral:
1446 case CK_IntegralCast:
1447 case CK_IntegralComplexCast:
1448 case CK_IntegralComplexToBoolean:
1449 case CK_IntegralComplexToFloatingComplex:
1450 case CK_IntegralComplexToReal:
1451 case CK_IntegralRealToComplex:
1452 case CK_IntegralToBoolean:
1453 case CK_IntegralToFloating:
1454 // Reinterpreting integers as pointers and vice versa.
1455 case CK_IntegralToPointer:
1456 case CK_PointerToIntegral:
1457 // Language extensions.
1458 case CK_VectorSplat:
1459 case CK_MatrixCast:
1460 case CK_NonAtomicToAtomic:
1461 case CK_AtomicToNonAtomic:
1462 case CK_HLSLVectorTruncation:
1463 return true;
1464
1465 case CK_BaseToDerivedMemberPointer:
1466 case CK_DerivedToBaseMemberPointer:
1467 case CK_MemberPointerToBoolean:
1468 case CK_NullToMemberPointer:
1469 case CK_ReinterpretMemberPointer:
1470 // FIXME: ABI-dependent.
1471 return false;
1472
1473 case CK_AnyPointerToBlockPointerCast:
1474 case CK_BlockPointerToObjCPointerCast:
1475 case CK_CPointerToObjCPointerCast:
1476 case CK_ObjCObjectLValueCast:
1477 case CK_IntToOCLSampler:
1478 case CK_ZeroToOCLOpaqueType:
1479 // FIXME: Check these.
1480 return false;
1481
1482 case CK_FixedPointCast:
1483 case CK_FixedPointToBoolean:
1484 case CK_FixedPointToFloating:
1485 case CK_FixedPointToIntegral:
1486 case CK_FloatingToFixedPoint:
1487 case CK_IntegralToFixedPoint:
1488 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1489 return false;
1490
1491 case CK_AddressSpaceConversion:
1492 case CK_BaseToDerived:
1493 case CK_DerivedToBase:
1494 case CK_Dynamic:
1495 case CK_NullToPointer:
1496 case CK_PointerToBoolean:
1497 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1498 // same representation in all involved address spaces.
1499 return false;
1500
1501 case CK_ARCConsumeObject:
1502 case CK_ARCExtendBlockObject:
1503 case CK_ARCProduceObject:
1504 case CK_ARCReclaimReturnedObject:
1505 case CK_CopyAndAutoreleaseBlockObject:
1506 case CK_ArrayToPointerDecay:
1507 case CK_FunctionToPointerDecay:
1508 case CK_BuiltinFnToFnPtr:
1509 case CK_Dependent:
1510 case CK_LValueBitCast:
1511 case CK_LValueToRValue:
1512 case CK_LValueToRValueBitCast:
1513 case CK_UncheckedDerivedToBase:
1514 case CK_HLSLArrayRValue:
1515 return false;
1516 }
1517 llvm_unreachable("Unhandled clang::CastKind enum");
1518}
1519
1520/// isSimpleZero - If emitting this value will obviously just cause a store of
1521/// zero to memory, return true. This can return false if uncertain, so it just
1522/// handles simple cases.
1523static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1524 E = E->IgnoreParens();
1525 while (auto *CE = dyn_cast<CastExpr>(E)) {
1526 if (!castPreservesZero(CE))
1527 break;
1528 E = CE->getSubExpr()->IgnoreParens();
1529 }
1530
1531 // 0
1532 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1533 return IL->getValue() == 0;
1534 // +0.0
1535 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1536 return FL->getValue().isPosZero();
1537 // int()
1538 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1540 return true;
1541 // (int*)0 - Null pointer expressions.
1542 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1543 return ICE->getCastKind() == CK_NullToPointer &&
1545 !E->HasSideEffects(CGF.getContext());
1546 // '\0'
1547 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1548 return CL->getValue() == 0;
1549
1550 // Otherwise, hard case: conservatively return false.
1551 return false;
1552}
1553
1554
1555void
1556AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1557 QualType type = LV.getType();
1558 // FIXME: Ignore result?
1559 // FIXME: Are initializers affected by volatile?
1560 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1561 // Storing "i32 0" to a zero'd memory location is a noop.
1562 return;
1563 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1564 return EmitNullInitializationToLValue(LV);
1565 } else if (isa<NoInitExpr>(E)) {
1566 // Do nothing.
1567 return;
1568 } else if (type->isReferenceType()) {
1570 return CGF.EmitStoreThroughLValue(RV, LV);
1571 }
1572
1573 switch (CGF.getEvaluationKind(type)) {
1574 case TEK_Complex:
1575 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1576 return;
1577 case TEK_Aggregate:
1578 CGF.EmitAggExpr(
1583 return;
1584 case TEK_Scalar:
1585 if (LV.isSimple()) {
1586 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1587 } else {
1589 }
1590 return;
1591 }
1592 llvm_unreachable("bad evaluation kind");
1593}
1594
1595void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1596 QualType type = lv.getType();
1597
1598 // If the destination slot is already zeroed out before the aggregate is
1599 // copied into it, we don't have to emit any zeros here.
1600 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1601 return;
1602
1603 if (CGF.hasScalarEvaluationKind(type)) {
1604 // For non-aggregates, we can store the appropriate null constant.
1605 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1606 // Note that the following is not equivalent to
1607 // EmitStoreThroughBitfieldLValue for ARC types.
1608 if (lv.isBitField()) {
1610 } else {
1611 assert(lv.isSimple());
1612 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1613 }
1614 } else {
1615 // There's a potential optimization opportunity in combining
1616 // memsets; that would be easy for arrays, but relatively
1617 // difficult for structures with the current code.
1619 }
1620}
1621
1622void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1623 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1624 E->getInitializedFieldInUnion(),
1625 E->getArrayFiller());
1626}
1627
1628void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1629 if (E->hadArrayRangeDesignator())
1630 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1631
1632 if (E->isTransparent())
1633 return Visit(E->getInit(0));
1634
1635 VisitCXXParenListOrInitListExpr(
1636 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1637}
1638
1639void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1640 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1641 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1642#if 0
1643 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1644 // (Length of globals? Chunks of zeroed-out space?).
1645 //
1646 // If we can, prefer a copy from a global; this is a lot less code for long
1647 // globals, and it's easier for the current optimizers to analyze.
1648 if (llvm::Constant *C =
1649 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1650 llvm::GlobalVariable* GV =
1651 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1652 llvm::GlobalValue::InternalLinkage, C, "");
1653 EmitFinalDestCopy(ExprToVisit->getType(),
1654 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1655 return;
1656 }
1657#endif
1658
1659 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1660
1661 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1662
1663 // Handle initialization of an array.
1664 if (ExprToVisit->getType()->isConstantArrayType()) {
1665 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1666 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1667 InitExprs, ArrayFiller);
1668 return;
1669 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1670 // A variable array type that has an initializer can only do empty
1671 // initialization. And because this feature is not exposed as an extension
1672 // in C++, we can safely memset the array memory to zero.
1673 assert(InitExprs.size() == 0 &&
1674 "you can only use an empty initializer with VLAs");
1675 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1676 return;
1677 }
1678
1679 assert(ExprToVisit->getType()->isRecordType() &&
1680 "Only support structs/unions here!");
1681
1682 // Do struct initialization; this code just sets each individual member
1683 // to the approprate value. This makes bitfield support automatic;
1684 // the disadvantage is that the generated code is more difficult for
1685 // the optimizer, especially with bitfields.
1686 unsigned NumInitElements = InitExprs.size();
1687 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1688
1689 // We'll need to enter cleanup scopes in case any of the element
1690 // initializers throws an exception.
1692 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1693
1694 unsigned curInitIndex = 0;
1695
1696 // Emit initialization of base classes.
1697 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1698 assert(NumInitElements >= CXXRD->getNumBases() &&
1699 "missing initializer for base class");
1700 for (auto &Base : CXXRD->bases()) {
1701 assert(!Base.isVirtual() && "should not see vbases here");
1702 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1704 Dest.getAddress(), CXXRD, BaseRD,
1705 /*isBaseVirtual*/ false);
1707 V, Qualifiers(),
1711 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1712 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1713
1714 if (QualType::DestructionKind dtorKind =
1715 Base.getType().isDestructedType())
1716 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1717 }
1718 }
1719
1720 // Prepare a 'this' for CXXDefaultInitExprs.
1721 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1722
1723 if (record->isUnion()) {
1724 // Only initialize one field of a union. The field itself is
1725 // specified by the initializer list.
1726 if (!InitializedFieldInUnion) {
1727 // Empty union; we have nothing to do.
1728
1729#ifndef NDEBUG
1730 // Make sure that it's really an empty and not a failure of
1731 // semantic analysis.
1732 for (const auto *Field : record->fields())
1733 assert(
1734 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1735 "Only unnamed bitfields or anonymous class allowed");
1736#endif
1737 return;
1738 }
1739
1740 // FIXME: volatility
1741 FieldDecl *Field = InitializedFieldInUnion;
1742
1743 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1744 if (NumInitElements) {
1745 // Store the initializer into the field
1746 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1747 } else {
1748 // Default-initialize to null.
1749 EmitNullInitializationToLValue(FieldLoc);
1750 }
1751
1752 return;
1753 }
1754
1755 // Here we iterate over the fields; this makes it simpler to both
1756 // default-initialize fields and skip over unnamed fields.
1757 for (const auto *field : record->fields()) {
1758 // We're done once we hit the flexible array member.
1759 if (field->getType()->isIncompleteArrayType())
1760 break;
1761
1762 // Always skip anonymous bitfields.
1763 if (field->isUnnamedBitField())
1764 continue;
1765
1766 // We're done if we reach the end of the explicit initializers, we
1767 // have a zeroed object, and the rest of the fields are
1768 // zero-initializable.
1769 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1770 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1771 break;
1772
1773
1774 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1775 // We never generate write-barries for initialized fields.
1776 LV.setNonGC(true);
1777
1778 if (curInitIndex < NumInitElements) {
1779 // Store the initializer into the field.
1780 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1781 } else {
1782 // We're out of initializers; default-initialize to null
1783 EmitNullInitializationToLValue(LV);
1784 }
1785
1786 // Push a destructor if necessary.
1787 // FIXME: if we have an array of structures, all explicitly
1788 // initialized, we can end up pushing a linear number of cleanups.
1789 if (QualType::DestructionKind dtorKind
1790 = field->getType().isDestructedType()) {
1791 assert(LV.isSimple());
1792 if (dtorKind) {
1794 field->getType(),
1795 CGF.getDestroyer(dtorKind), false);
1796 }
1797 }
1798 }
1799}
1800
1801void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1802 llvm::Value *outerBegin) {
1803 // Emit the common subexpression.
1804 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1805
1806 Address destPtr = EnsureSlot(E->getType()).getAddress();
1807 uint64_t numElements = E->getArraySize().getZExtValue();
1808
1809 if (!numElements)
1810 return;
1811
1812 // destPtr is an array*. Construct an elementType* by drilling down a level.
1813 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1814 llvm::Value *indices[] = {zero, zero};
1815 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1816 destPtr.emitRawPointer(CGF),
1817 indices, "arrayinit.begin");
1818
1819 // Prepare to special-case multidimensional array initialization: we avoid
1820 // emitting multiple destructor loops in that case.
1821 if (!outerBegin)
1822 outerBegin = begin;
1823 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1824
1825 QualType elementType =
1827 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1828 CharUnits elementAlign =
1829 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1830 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1831
1832 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1833 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1834
1835 // Jump into the body.
1836 CGF.EmitBlock(bodyBB);
1837 llvm::PHINode *index =
1838 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1839 index->addIncoming(zero, entryBB);
1840 llvm::Value *element =
1841 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1842
1843 // Prepare for a cleanup.
1844 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1846 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1847 if (outerBegin->getType() != element->getType())
1848 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1849 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1850 elementAlign,
1851 CGF.getDestroyer(dtorKind));
1853 } else {
1854 dtorKind = QualType::DK_none;
1855 }
1856
1857 // Emit the actual filler expression.
1858 {
1859 // Temporaries created in an array initialization loop are destroyed
1860 // at the end of each iteration.
1861 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1862 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1863 LValue elementLV = CGF.MakeAddrLValue(
1864 Address(element, llvmElementType, elementAlign), elementType);
1865
1866 if (InnerLoop) {
1867 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1868 auto elementSlot = AggValueSlot::forLValue(
1869 elementLV, AggValueSlot::IsDestructed,
1872 AggExprEmitter(CGF, elementSlot, false)
1873 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1874 } else
1875 EmitInitializationToLValue(E->getSubExpr(), elementLV);
1876 }
1877
1878 // Move on to the next element.
1879 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1880 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1881 index->addIncoming(nextIndex, Builder.GetInsertBlock());
1882
1883 // Leave the loop if we're done.
1884 llvm::Value *done = Builder.CreateICmpEQ(
1885 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1886 "arrayinit.done");
1887 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1888 Builder.CreateCondBr(done, endBB, bodyBB);
1889
1890 CGF.EmitBlock(endBB);
1891
1892 // Leave the partial-array cleanup if we entered one.
1893 if (dtorKind)
1894 CGF.DeactivateCleanupBlock(cleanup, index);
1895}
1896
1897void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1898 AggValueSlot Dest = EnsureSlot(E->getType());
1899
1900 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1901 EmitInitializationToLValue(E->getBase(), DestLV);
1902 VisitInitListExpr(E->getUpdater());
1903}
1904
1905//===----------------------------------------------------------------------===//
1906// Entry Points into this File
1907//===----------------------------------------------------------------------===//
1908
1909/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1910/// non-zero bytes that will be stored when outputting the initializer for the
1911/// specified initializer expression.
1913 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1914 E = MTE->getSubExpr();
1916
1917 // 0 and 0.0 won't require any non-zero stores!
1918 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1919
1920 // If this is an initlist expr, sum up the size of sizes of the (present)
1921 // elements. If this is something weird, assume the whole thing is non-zero.
1922 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1923 while (ILE && ILE->isTransparent())
1924 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1925 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1926 return CGF.getContext().getTypeSizeInChars(E->getType());
1927
1928 // InitListExprs for structs have to be handled carefully. If there are
1929 // reference members, we need to consider the size of the reference, not the
1930 // referencee. InitListExprs for unions and arrays can't have references.
1931 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1932 if (!RT->isUnionType()) {
1933 RecordDecl *SD = RT->getDecl();
1934 CharUnits NumNonZeroBytes = CharUnits::Zero();
1935
1936 unsigned ILEElement = 0;
1937 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1938 while (ILEElement != CXXRD->getNumBases())
1939 NumNonZeroBytes +=
1940 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1941 for (const auto *Field : SD->fields()) {
1942 // We're done once we hit the flexible array member or run out of
1943 // InitListExpr elements.
1944 if (Field->getType()->isIncompleteArrayType() ||
1945 ILEElement == ILE->getNumInits())
1946 break;
1947 if (Field->isUnnamedBitField())
1948 continue;
1949
1950 const Expr *E = ILE->getInit(ILEElement++);
1951
1952 // Reference values are always non-null and have the width of a pointer.
1953 if (Field->getType()->isReferenceType())
1954 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1955 CGF.getTarget().getPointerWidth(LangAS::Default));
1956 else
1957 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1958 }
1959
1960 return NumNonZeroBytes;
1961 }
1962 }
1963
1964 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1965 CharUnits NumNonZeroBytes = CharUnits::Zero();
1966 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1967 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1968 return NumNonZeroBytes;
1969}
1970
1971/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1972/// zeros in it, emit a memset and avoid storing the individual zeros.
1973///
1975 CodeGenFunction &CGF) {
1976 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1977 // volatile stores.
1978 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1979 return;
1980
1981 // C++ objects with a user-declared constructor don't need zero'ing.
1982 if (CGF.getLangOpts().CPlusPlus)
1983 if (const RecordType *RT = CGF.getContext()
1985 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1987 return;
1988 }
1989
1990 // If the type is 16-bytes or smaller, prefer individual stores over memset.
1991 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1992 if (Size <= CharUnits::fromQuantity(16))
1993 return;
1994
1995 // Check to see if over 3/4 of the initializer are known to be zero. If so,
1996 // we prefer to emit memset + individual stores for the rest.
1997 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1998 if (NumNonZeroBytes*4 > Size)
1999 return;
2000
2001 // Okay, it seems like a good idea to use an initial memset, emit the call.
2002 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2003
2005 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2006
2007 // Tell the AggExprEmitter that the slot is known zero.
2008 Slot.setZeroed();
2009}
2010
2011
2012
2013
2014/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2015/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2016/// the value of the aggregate expression is not needed. If VolatileDest is
2017/// true, DestPtr cannot be 0.
2018void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2019 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2020 "Invalid aggregate expression to emit");
2021 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2022 "slot has bits but no address");
2023
2024 // Optimize the slot if possible.
2025 CheckAggExprForMemSetUse(Slot, E, *this);
2026
2027 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2028}
2029
2031 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2032 Address Temp = CreateMemTemp(E->getType());
2033 LValue LV = MakeAddrLValue(Temp, E->getType());
2038 return LV;
2039}
2040
2042 const LValue &Src,
2043 ExprValueKind SrcKind) {
2044 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2045 .EmitFinalDestCopy(Type, Src, SrcKind);
2046}
2047
2050 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2052
2053 // If the field lies entirely within the enclosing class's nvsize, its tail
2054 // padding cannot overlap any already-initialized object. (The only subobjects
2055 // with greater addresses that might already be initialized are vbases.)
2056 const RecordDecl *ClassRD = FD->getParent();
2057 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2058 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2059 getContext().getTypeSize(FD->getType()) <=
2060 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2062
2063 // The tail padding may contain values we need to preserve.
2065}
2066
2068 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2069 // If the most-derived object is a field declared with [[no_unique_address]],
2070 // the tail padding of any virtual base could be reused for other subobjects
2071 // of that field's class.
2072 if (IsVirtual)
2074
2075 // If the base class is laid out entirely within the nvsize of the derived
2076 // class, its tail padding cannot yet be initialized, so we can issue
2077 // stores at the full width of the base class.
2078 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2079 if (Layout.getBaseClassOffset(BaseRD) +
2080 getContext().getASTRecordLayout(BaseRD).getSize() <=
2081 Layout.getNonVirtualSize())
2083
2084 // The tail padding may contain values we need to preserve.
2086}
2087
2089 AggValueSlot::Overlap_t MayOverlap,
2090 bool isVolatile) {
2091 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2092
2093 Address DestPtr = Dest.getAddress();
2094 Address SrcPtr = Src.getAddress();
2095
2096 if (getLangOpts().CPlusPlus) {
2097 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2098 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2099 assert((Record->hasTrivialCopyConstructor() ||
2100 Record->hasTrivialCopyAssignment() ||
2101 Record->hasTrivialMoveConstructor() ||
2102 Record->hasTrivialMoveAssignment() ||
2103 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2104 "Trying to aggregate-copy a type without a trivial copy/move "
2105 "constructor or assignment operator");
2106 // Ignore empty classes in C++.
2107 if (Record->isEmpty())
2108 return;
2109 }
2110 }
2111
2112 if (getLangOpts().CUDAIsDevice) {
2114 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2115 Src))
2116 return;
2117 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2118 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2119 Src))
2120 return;
2121 }
2122 }
2123
2124 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2125 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2126 // read from another object that overlaps in anyway the storage of the first
2127 // object, then the overlap shall be exact and the two objects shall have
2128 // qualified or unqualified versions of a compatible type."
2129 //
2130 // memcpy is not defined if the source and destination pointers are exactly
2131 // equal, but other compilers do this optimization, and almost every memcpy
2132 // implementation handles this case safely. If there is a libc that does not
2133 // safely handle this, we can add a target hook.
2134
2135 // Get data size info for this aggregate. Don't copy the tail padding if this
2136 // might be a potentially-overlapping subobject, since the tail padding might
2137 // be occupied by a different object. Otherwise, copying it is fine.
2139 if (MayOverlap)
2141 else
2143
2144 llvm::Value *SizeVal = nullptr;
2145 if (TypeInfo.Width.isZero()) {
2146 // But note that getTypeInfo returns 0 for a VLA.
2147 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2148 getContext().getAsArrayType(Ty))) {
2149 QualType BaseEltTy;
2150 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2151 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2152 assert(!TypeInfo.Width.isZero());
2153 SizeVal = Builder.CreateNUWMul(
2154 SizeVal,
2155 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2156 }
2157 }
2158 if (!SizeVal) {
2159 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2160 }
2161
2162 // FIXME: If we have a volatile struct, the optimizer can remove what might
2163 // appear to be `extra' memory ops:
2164 //
2165 // volatile struct { int i; } a, b;
2166 //
2167 // int main() {
2168 // a = b;
2169 // a = b;
2170 // }
2171 //
2172 // we need to use a different call here. We use isVolatile to indicate when
2173 // either the source or the destination is volatile.
2174
2175 DestPtr = DestPtr.withElementType(Int8Ty);
2176 SrcPtr = SrcPtr.withElementType(Int8Ty);
2177
2178 // Don't do any of the memmove_collectable tests if GC isn't set.
2179 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2180 // fall through
2181 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2182 RecordDecl *Record = RecordTy->getDecl();
2183 if (Record->hasObjectMember()) {
2184 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2185 SizeVal);
2186 return;
2187 }
2188 } else if (Ty->isArrayType()) {
2189 QualType BaseType = getContext().getBaseElementType(Ty);
2190 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2191 if (RecordTy->getDecl()->hasObjectMember()) {
2192 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2193 SizeVal);
2194 return;
2195 }
2196 }
2197 }
2198
2199 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2200
2201 // Determine the metadata to describe the position of any padding in this
2202 // memcpy, as well as the TBAA tags for the members of the struct, in case
2203 // the optimizer wishes to expand it in to scalar memory operations.
2204 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2205 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2206
2207 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2209 Dest.getTBAAInfo(), Src.getTBAAInfo());
2210 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2211 }
2212}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3338
CompareKind
Definition: CGExprAgg.cpp:989
@ CK_Greater
Definition: CGExprAgg.cpp:991
@ CK_Less
Definition: CGExprAgg.cpp:990
@ CK_Equal
Definition: CGExprAgg.cpp:992
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
Definition: CGExprAgg.cpp:1912
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
Definition: CGExprAgg.cpp:729
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
Definition: CGExprAgg.cpp:1140
static bool isTrivialFiller(Expr *E)
Determine if E is a trivial array filler, that is, one that is equivalent to zero-initialization.
Definition: CGExprAgg.cpp:472
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
Definition: CGExprAgg.cpp:1523
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:995
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
Definition: CGExprAgg.cpp:1426
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
Definition: CGExprAgg.cpp:1974
Expr * E
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
llvm::MachO::Record Record
Definition: MachO.h:31
SourceLocation Loc
Definition: SemaObjC.cpp:758
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:186
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2822
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
Definition: ASTContext.h:2641
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
Definition: ASTContext.h:2322
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2672
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:210
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4165
Represents a loop initializing the elements of an array.
Definition: Expr.h:5693
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2674
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3540
QualType getElementType() const
Definition: Type.h:3552
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6619
QualType getValueType() const
Gets the type contained by this atomic type, i.e.
Definition: Type.h:7573
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3860
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1082
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a list-initialization with parenthesis.
Definition: ExprCXX.h:4952
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition: DeclCXX.cpp:576
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition: DeclCXX.h:791
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type.
Definition: ExprCXX.h:2181
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition: ExprCXX.h:797
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3498
CastKind getCastKind() const
Definition: Expr.h:3542
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4582
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4618
Represents a 'co_await' expression.
Definition: ExprCXX.h:5183
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
void setVolatile(bool flag)
Definition: CGValue.h:623
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition: CGValue.h:682
NeedsGCBarriers_t requiresGCollection() const
Definition: CGValue.h:634
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
void setZeroed(bool V=true)
Definition: CGValue.h:674
IsZeroed_t isZeroed() const
Definition: CGValue.h:675
Qualifiers getQualifiers() const
Definition: CGValue.h:617
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
IsAliased_t isPotentiallyAliased() const
Definition: CGValue.h:654
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
IsDestructed_t isExternallyDestructed() const
Definition: CGValue.h:610
Overlap_t mayOverlap() const
Definition: CGValue.h:658
RValue asRValue() const
Definition: CGValue.h:666
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:640
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:852
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:396
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:218
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:363
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
void callCStructCopyConstructor(LValue Dst, LValue Src)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile)
Build all the stores needed to initialize an aggregate at Dest with the value Val.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1243
llvm::MDNode * getTBAAStructInfo(QualType QTy)
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, TBAAAccessInfo SrcInfo)
mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the purposes of memory transfer call...
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:639
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isSimple() const
Definition: CGValue.h:278
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition: CGValue.h:88
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
bool isComplex() const
Definition: CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:372
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3108
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3428
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:3578
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5264
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2359
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool hasAttr() const
Definition: DeclBase.h:583
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3472
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3097
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3567
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3030
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4632
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3243
Represents a C11 generic selection.
Definition: Expr.h:5907
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5782
Describes an C or C++ initializer list.
Definition: Expr.h:5029
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2443
unsigned getNumInits() const
Definition: Expr.h:5059
const Expr * getInit(unsigned Init) const
Definition: Expr.h:5075
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1954
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: ExprCXX.h:2066
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4726
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3187
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3482
Represents a place-holder for an object not to be initialized by anything.
Definition: Expr.h:5602
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2135
const Expr * getSubExpr() const
Definition: Expr.h:2150
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6487
A (possibly-)qualified type.
Definition: Type.h:941
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7827
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2748
@ DK_nontrivial_c_struct
Definition: Type.h:1535
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7869
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1542
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2593
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition: Type.h:1514
The collection of all-type qualifiers we support.
Definition: Type.h:319
Represents a struct/union/class.
Definition: Decl.h:4141
bool hasObjectMember() const
Definition: Decl.h:4201
field_range fields() const
Definition: Decl.h:4347
field_iterator field_begin() const
Definition: Decl.cpp:5057
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5936
RecordDecl * getDecl() const
Definition: Type.h:5946
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4407
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4482
bool isUnion() const
Definition: Decl.h:3763
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:472
The base class of the type hierarchy.
Definition: Type.h:1829
bool isConstantArrayType() const
Definition: Type.h:8068
bool isArrayType() const
Definition: Type.h:8064
bool isPointerType() const
Definition: Type.h:7996
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8583
bool isVariableArrayType() const
Definition: Type.h:8076
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:4985
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8410
bool isAnyComplexType() const
Definition: Type.h:8100
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2186
bool isMemberPointerType() const
Definition: Type.h:8046
bool isAtomicType() const
Definition: Type.h:8147
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:4992
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2258
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2266
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8516
bool isNullPtrType() const
Definition: Type.h:8328
bool isRecordType() const
Definition: Type.h:8092
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2188
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4691
QualType getType() const
Definition: Decl.h:678
Represents a variable declaration or definition.
Definition: Decl.h:879
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3819
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2204
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1124
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:56
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
CastKind
CastKind - The kind of operation required for a conversion.
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:129
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition: Address.h:325
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
uint64_t Width
Definition: ASTContext.h:157