clang 20.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "ConstantEmitter.h"
18#include "EHScopeStack.h"
19#include "TargetInfo.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/DeclCXX.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/Intrinsics.h"
31using namespace clang;
32using namespace CodeGen;
33
34//===----------------------------------------------------------------------===//
35// Aggregate Expression Emitter
36//===----------------------------------------------------------------------===//
37
38namespace llvm {
39extern cl::opt<bool> EnableSingleByteCoverage;
40} // namespace llvm
41
42namespace {
43class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
44 CodeGenFunction &CGF;
45 CGBuilderTy &Builder;
46 AggValueSlot Dest;
47 bool IsResultUnused;
48
49 AggValueSlot EnsureSlot(QualType T) {
50 if (!Dest.isIgnored()) return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored()) return;
55 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
56 }
57
58 // Calls `Fn` with a valid return value slot, potentially creating a temporary
59 // to do so. If a temporary is created, an appropriate copy into `Dest` will
60 // be emitted, as will lifetime markers.
61 //
62 // The given function should take a ReturnValueSlot, and return an RValue that
63 // points to said slot.
64 void withReturnValueSlot(const Expr *E,
65 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
66
67public:
68 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
69 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
70 IsResultUnused(IsResultUnused) { }
71
72 //===--------------------------------------------------------------------===//
73 // Utilities
74 //===--------------------------------------------------------------------===//
75
76 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
77 /// represents a value lvalue, this method emits the address of the lvalue,
78 /// then loads the result into DestPtr.
79 void EmitAggLoadOfLValue(const Expr *E);
80
81 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
82 /// SrcIsRValue is true if source comes from an RValue.
83 void EmitFinalDestCopy(QualType type, const LValue &src,
84 CodeGenFunction::ExprValueKind SrcValueKind =
85 CodeGenFunction::EVK_NonRValue);
86 void EmitFinalDestCopy(QualType type, RValue src);
87 void EmitCopy(QualType type, const AggValueSlot &dest,
88 const AggValueSlot &src);
89
90 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
91 Expr *ExprToVisit, ArrayRef<Expr *> Args,
92 Expr *ArrayFiller);
93
95 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
98 }
99
100 bool TypeRequiresGCollection(QualType T);
101
102 //===--------------------------------------------------------------------===//
103 // Visitor Methods
104 //===--------------------------------------------------------------------===//
105
106 void Visit(Expr *E) {
107 ApplyDebugLocation DL(CGF, E);
109 }
110
111 void VisitStmt(Stmt *S) {
112 CGF.ErrorUnsupported(S, "aggregate expression");
113 }
114 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
115 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
116 Visit(GE->getResultExpr());
117 }
118 void VisitCoawaitExpr(CoawaitExpr *E) {
119 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
120 }
121 void VisitCoyieldExpr(CoyieldExpr *E) {
122 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
123 }
124 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
125 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
126 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
127 return Visit(E->getReplacement());
128 }
129
130 void VisitConstantExpr(ConstantExpr *E) {
131 EnsureDest(E->getType());
132
133 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
135 Result, Dest.getAddress(),
136 llvm::TypeSize::getFixed(
137 Dest.getPreferredSize(CGF.getContext(), E->getType())
138 .getQuantity()),
140 return;
141 }
142 return Visit(E->getSubExpr());
143 }
144
145 // l-values.
146 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
147 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
148 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
149 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
150 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
151 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
152 EmitAggLoadOfLValue(E);
153 }
154 void VisitPredefinedExpr(const PredefinedExpr *E) {
155 EmitAggLoadOfLValue(E);
156 }
157
158 // Operators.
159 void VisitCastExpr(CastExpr *E);
160 void VisitCallExpr(const CallExpr *E);
161 void VisitStmtExpr(const StmtExpr *E);
162 void VisitBinaryOperator(const BinaryOperator *BO);
163 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
164 void VisitBinAssign(const BinaryOperator *E);
165 void VisitBinComma(const BinaryOperator *E);
166 void VisitBinCmp(const BinaryOperator *E);
167 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
168 Visit(E->getSemanticForm());
169 }
170
171 void VisitObjCMessageExpr(ObjCMessageExpr *E);
172 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
173 EmitAggLoadOfLValue(E);
174 }
175
176 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
177 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
178 void VisitChooseExpr(const ChooseExpr *CE);
179 void VisitInitListExpr(InitListExpr *E);
180 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
181 FieldDecl *InitializedFieldInUnion,
182 Expr *ArrayFiller);
183 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
184 llvm::Value *outerBegin = nullptr);
185 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
186 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
187 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
188 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
189 Visit(DAE->getExpr());
190 }
191 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
192 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
193 Visit(DIE->getExpr());
194 }
195 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
196 void VisitCXXConstructExpr(const CXXConstructExpr *E);
197 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
198 void VisitLambdaExpr(LambdaExpr *E);
199 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
200 void VisitExprWithCleanups(ExprWithCleanups *E);
201 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
202 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
203 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
204 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
205
206 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
207 if (E->isGLValue()) {
209 return EmitFinalDestCopy(E->getType(), LV);
210 }
211
212 AggValueSlot Slot = EnsureSlot(E->getType());
213 bool NeedsDestruction =
214 !Slot.isExternallyDestructed() &&
216 if (NeedsDestruction)
218 CGF.EmitPseudoObjectRValue(E, Slot);
219 if (NeedsDestruction)
221 E->getType());
222 }
223
224 void VisitVAArgExpr(VAArgExpr *E);
225 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
226 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
227 Expr *ArrayFiller);
228
229 void EmitInitializationToLValue(Expr *E, LValue Address);
230 void EmitNullInitializationToLValue(LValue Address);
231 // case Expr::ChooseExprClass:
232 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
233 void VisitAtomicExpr(AtomicExpr *E) {
234 RValue Res = CGF.EmitAtomicExpr(E);
235 EmitFinalDestCopy(E->getType(), Res);
236 }
237 void VisitPackIndexingExpr(PackIndexingExpr *E) {
238 Visit(E->getSelectedExpr());
239 }
240};
241} // end anonymous namespace.
242
243//===----------------------------------------------------------------------===//
244// Utilities
245//===----------------------------------------------------------------------===//
246
247/// EmitAggLoadOfLValue - Given an expression with aggregate type that
248/// represents a value lvalue, this method emits the address of the lvalue,
249/// then loads the result into DestPtr.
250void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
251 LValue LV = CGF.EmitLValue(E);
252
253 // If the type of the l-value is atomic, then do an atomic load.
255 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
256 return;
257 }
258
259 EmitFinalDestCopy(E->getType(), LV);
260}
261
262/// True if the given aggregate type requires special GC API calls.
263bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
264 // Only record types have members that might require garbage collection.
265 const RecordType *RecordTy = T->getAs<RecordType>();
266 if (!RecordTy) return false;
267
268 // Don't mess with non-trivial C++ types.
269 RecordDecl *Record = RecordTy->getDecl();
270 if (isa<CXXRecordDecl>(Record) &&
271 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
272 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
273 return false;
274
275 // Check whether the type has an object member.
276 return Record->hasObjectMember();
277}
278
279void AggExprEmitter::withReturnValueSlot(
280 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
281 QualType RetTy = E->getType();
282 bool RequiresDestruction =
283 !Dest.isExternallyDestructed() &&
285
286 // If it makes no observable difference, save a memcpy + temporary.
287 //
288 // We need to always provide our own temporary if destruction is required.
289 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
290 // its lifetime before we have the chance to emit a proper destructor call.
291 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
292 (RequiresDestruction && Dest.isIgnored());
293
294 Address RetAddr = Address::invalid();
295 RawAddress RetAllocaAddr = RawAddress::invalid();
296
297 EHScopeStack::stable_iterator LifetimeEndBlock;
298 llvm::Value *LifetimeSizePtr = nullptr;
299 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
300 if (!UseTemp) {
301 RetAddr = Dest.getAddress();
302 } else {
303 RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
304 llvm::TypeSize Size =
305 CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
306 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
307 if (LifetimeSizePtr) {
308 LifetimeStartInst =
309 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
310 assert(LifetimeStartInst->getIntrinsicID() ==
311 llvm::Intrinsic::lifetime_start &&
312 "Last insertion wasn't a lifetime.start?");
313
314 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
315 NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
316 LifetimeEndBlock = CGF.EHStack.stable_begin();
317 }
318 }
319
320 RValue Src =
321 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
322 Dest.isExternallyDestructed()));
323
324 if (!UseTemp)
325 return;
326
327 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
328 Src.getAggregatePointer(E->getType(), CGF));
329 EmitFinalDestCopy(E->getType(), Src);
330
331 if (!RequiresDestruction && LifetimeStartInst) {
332 // If there's no dtor to run, the copy was the last use of our temporary.
333 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
334 // eagerly.
335 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
336 CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
337 }
338}
339
340/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
341void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
342 assert(src.isAggregate() && "value must be aggregate value!");
343 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
344 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
345}
346
347/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
348void AggExprEmitter::EmitFinalDestCopy(
349 QualType type, const LValue &src,
350 CodeGenFunction::ExprValueKind SrcValueKind) {
351 // If Dest is ignored, then we're evaluating an aggregate expression
352 // in a context that doesn't care about the result. Note that loads
353 // from volatile l-values force the existence of a non-ignored
354 // destination.
355 if (Dest.isIgnored())
356 return;
357
358 // Copy non-trivial C structs here.
359 LValue DstLV = CGF.MakeAddrLValue(
360 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
361
362 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
363 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
364 if (Dest.isPotentiallyAliased())
365 CGF.callCStructMoveAssignmentOperator(DstLV, src);
366 else
367 CGF.callCStructMoveConstructor(DstLV, src);
368 return;
369 }
370 } else {
371 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
372 if (Dest.isPotentiallyAliased())
373 CGF.callCStructCopyAssignmentOperator(DstLV, src);
374 else
375 CGF.callCStructCopyConstructor(DstLV, src);
376 return;
377 }
378 }
379
383 EmitCopy(type, Dest, srcAgg);
384}
385
386/// Perform a copy from the source into the destination.
387///
388/// \param type - the type of the aggregate being copied; qualifiers are
389/// ignored
390void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
391 const AggValueSlot &src) {
392 if (dest.requiresGCollection()) {
393 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
394 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
396 dest.getAddress(),
397 src.getAddress(),
398 size);
399 return;
400 }
401
402 // If the result of the assignment is used, copy the LHS there also.
403 // It's volatile if either side is. Use the minimum alignment of
404 // the two sides.
405 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
406 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
407 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
408 dest.isVolatile() || src.isVolatile());
409}
410
411/// Emit the initializer for a std::initializer_list initialized with a
412/// real initializer list.
413void
414AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
415 // Emit an array containing the elements. The array is externally destructed
416 // if the std::initializer_list object is.
417 ASTContext &Ctx = CGF.getContext();
418 LValue Array = CGF.EmitLValue(E->getSubExpr());
419 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
420 Address ArrayPtr = Array.getAddress();
421
423 Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
424 assert(ArrayType && "std::initializer_list constructed from non-array");
425
426 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
427 RecordDecl::field_iterator Field = Record->field_begin();
428 assert(Field != Record->field_end() &&
429 Ctx.hasSameType(Field->getType()->getPointeeType(),
431 "Expected std::initializer_list first field to be const E *");
432
433 // Start pointer.
434 AggValueSlot Dest = EnsureSlot(E->getType());
435 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
436 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
437 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
438 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
439 ++Field;
440 assert(Field != Record->field_end() &&
441 "Expected std::initializer_list to have two fields");
442
443 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
444 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
445 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
446 // Length.
447 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
448
449 } else {
450 // End pointer.
451 assert(Field->getType()->isPointerType() &&
452 Ctx.hasSameType(Field->getType()->getPointeeType(),
454 "Expected std::initializer_list second field to be const E *");
455 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
456 llvm::Value *IdxEnd[] = { Zero, Size };
457 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
458 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
459 "arrayend");
460 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
461 }
462
463 assert(++Field == Record->field_end() &&
464 "Expected std::initializer_list to only have two fields");
465}
466
467/// Determine if E is a trivial array filler, that is, one that is
468/// equivalent to zero-initialization.
469static bool isTrivialFiller(Expr *E) {
470 if (!E)
471 return true;
472
473 if (isa<ImplicitValueInitExpr>(E))
474 return true;
475
476 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
477 if (ILE->getNumInits())
478 return false;
479 return isTrivialFiller(ILE->getArrayFiller());
480 }
481
482 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
483 return Cons->getConstructor()->isDefaultConstructor() &&
484 Cons->getConstructor()->isTrivial();
485
486 // FIXME: Are there other cases where we can avoid emitting an initializer?
487 return false;
488}
489
490/// Emit initialization of an array from an initializer list. ExprToVisit must
491/// be either an InitListEpxr a CXXParenInitListExpr.
492void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
493 QualType ArrayQTy, Expr *ExprToVisit,
494 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
495 uint64_t NumInitElements = Args.size();
496
497 uint64_t NumArrayElements = AType->getNumElements();
498 for (const auto *Init : Args) {
499 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
500 NumInitElements += Embed->getDataElementCount() - 1;
501 if (NumInitElements > NumArrayElements) {
502 NumInitElements = NumArrayElements;
503 break;
504 }
505 }
506 }
507
508 assert(NumInitElements <= NumArrayElements);
509
510 QualType elementType =
511 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
512 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
513 CharUnits elementAlign =
514 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
515 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
516
517 // Consider initializing the array by copying from a global. For this to be
518 // more efficient than per-element initialization, the size of the elements
519 // with explicit initializers should be large enough.
520 if (NumInitElements * elementSize.getQuantity() > 16 &&
521 elementType.isTriviallyCopyableType(CGF.getContext())) {
522 CodeGen::CodeGenModule &CGM = CGF.CGM;
524 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
525 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
527 LangAS AS = GVArrayQTy.getAddressSpace();
528 if (llvm::Constant *C =
529 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
530 auto GV = new llvm::GlobalVariable(
531 CGM.getModule(), C->getType(),
532 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
533 "constinit",
534 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
536 Emitter.finalize(GV);
537 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
538 GV->setAlignment(Align.getAsAlign());
539 Address GVAddr(GV, GV->getValueType(), Align);
540 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
541 return;
542 }
543 }
544
545 // Exception safety requires us to destroy all the
546 // already-constructed members if an initializer throws.
547 // For that, we'll need an EH cleanup.
548 QualType::DestructionKind dtorKind = elementType.isDestructedType();
549 Address endOfInit = Address::invalid();
550 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
551
552 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
553 if (dtorKind) {
554 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
555 // In principle we could tell the cleanup where we are more
556 // directly, but the control flow can get so varied here that it
557 // would actually be quite complex. Therefore we go through an
558 // alloca.
559 llvm::Instruction *dominatingIP =
560 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
561 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
562 "arrayinit.endOfInit");
563 Builder.CreateStore(begin, endOfInit);
564 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
565 elementAlign,
566 CGF.getDestroyer(dtorKind));
567 cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
568 .AddAuxAllocas(allocaTracker.Take());
569
571 {CGF.EHStack.stable_begin(), dominatingIP});
572 }
573
574 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
575
576 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
577 llvm::Value *element = begin;
578 if (ArrayIndex > 0) {
579 element = Builder.CreateInBoundsGEP(
580 llvmElementType, begin,
581 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
582
583 // Tell the cleanup that it needs to destroy up to this
584 // element. TODO: some of these stores can be trivially
585 // observed to be unnecessary.
586 if (endOfInit.isValid())
587 Builder.CreateStore(element, endOfInit);
588 }
589
590 LValue elementLV = CGF.MakeAddrLValue(
591 Address(element, llvmElementType, elementAlign), elementType);
592 EmitInitializationToLValue(Init, elementLV);
593 return true;
594 };
595
596 unsigned ArrayIndex = 0;
597 // Emit the explicit initializers.
598 for (uint64_t i = 0; i != NumInitElements; ++i) {
599 if (ArrayIndex >= NumInitElements)
600 break;
601 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
602 EmbedS->doForEachDataElement(Emit, ArrayIndex);
603 } else {
604 Emit(Args[i], ArrayIndex);
605 ArrayIndex++;
606 }
607 }
608
609 // Check whether there's a non-trivial array-fill expression.
610 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
611
612 // Any remaining elements need to be zero-initialized, possibly
613 // using the filler expression. We can skip this if the we're
614 // emitting to zeroed memory.
615 if (NumInitElements != NumArrayElements &&
616 !(Dest.isZeroed() && hasTrivialFiller &&
617 CGF.getTypes().isZeroInitializable(elementType))) {
618
619 // Use an actual loop. This is basically
620 // do { *array++ = filler; } while (array != end);
621
622 // Advance to the start of the rest of the array.
623 llvm::Value *element = begin;
624 if (NumInitElements) {
625 element = Builder.CreateInBoundsGEP(
626 llvmElementType, element,
627 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
628 "arrayinit.start");
629 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
630 }
631
632 // Compute the end of the array.
633 llvm::Value *end = Builder.CreateInBoundsGEP(
634 llvmElementType, begin,
635 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
636
637 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
638 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
639
640 // Jump into the body.
641 CGF.EmitBlock(bodyBB);
642 llvm::PHINode *currentElement =
643 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
644 currentElement->addIncoming(element, entryBB);
645
646 // Emit the actual filler expression.
647 {
648 // C++1z [class.temporary]p5:
649 // when a default constructor is called to initialize an element of
650 // an array with no corresponding initializer [...] the destruction of
651 // every temporary created in a default argument is sequenced before
652 // the construction of the next array element, if any
653 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
654 LValue elementLV = CGF.MakeAddrLValue(
655 Address(currentElement, llvmElementType, elementAlign), elementType);
656 if (ArrayFiller)
657 EmitInitializationToLValue(ArrayFiller, elementLV);
658 else
659 EmitNullInitializationToLValue(elementLV);
660 }
661
662 // Move on to the next element.
663 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
664 llvmElementType, currentElement, one, "arrayinit.next");
665
666 // Tell the EH cleanup that we finished with the last element.
667 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
668
669 // Leave the loop if we're done.
670 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
671 "arrayinit.done");
672 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
673 Builder.CreateCondBr(done, endBB, bodyBB);
674 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
675
676 CGF.EmitBlock(endBB);
677 }
678}
679
680//===----------------------------------------------------------------------===//
681// Visitor Methods
682//===----------------------------------------------------------------------===//
683
684void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
685 Visit(E->getSubExpr());
686}
687
688void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
689 // If this is a unique OVE, just visit its source expression.
690 if (e->isUnique())
691 Visit(e->getSourceExpr());
692 else
693 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
694}
695
696void
697AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
698 if (Dest.isPotentiallyAliased() &&
699 E->getType().isPODType(CGF.getContext())) {
700 // For a POD type, just emit a load of the lvalue + a copy, because our
701 // compound literal might alias the destination.
702 EmitAggLoadOfLValue(E);
703 return;
704 }
705
706 AggValueSlot Slot = EnsureSlot(E->getType());
707
708 // Block-scope compound literals are destroyed at the end of the enclosing
709 // scope in C.
710 bool Destruct =
711 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
712 if (Destruct)
714
715 CGF.EmitAggExpr(E->getInitializer(), Slot);
716
717 if (Destruct)
720 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
721 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
722}
723
724/// Attempt to look through various unimportant expressions to find a
725/// cast of the given kind.
726static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
727 op = op->IgnoreParenNoopCasts(ctx);
728 if (auto castE = dyn_cast<CastExpr>(op)) {
729 if (castE->getCastKind() == kind)
730 return castE->getSubExpr();
731 }
732 return nullptr;
733}
734
735void AggExprEmitter::VisitCastExpr(CastExpr *E) {
736 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
737 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
738 switch (E->getCastKind()) {
739 case CK_Dynamic: {
740 // FIXME: Can this actually happen? We have no test coverage for it.
741 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
742 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
743 CodeGenFunction::TCK_Load);
744 // FIXME: Do we also need to handle property references here?
745 if (LV.isSimple())
746 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
747 else
748 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
749
750 if (!Dest.isIgnored())
751 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
752 break;
753 }
754
755 case CK_ToUnion: {
756 // Evaluate even if the destination is ignored.
757 if (Dest.isIgnored()) {
758 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
759 /*ignoreResult=*/true);
760 break;
761 }
762
763 // GCC union extension
764 QualType Ty = E->getSubExpr()->getType();
765 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
766 EmitInitializationToLValue(E->getSubExpr(),
767 CGF.MakeAddrLValue(CastPtr, Ty));
768 break;
769 }
770
771 case CK_LValueToRValueBitCast: {
772 if (Dest.isIgnored()) {
773 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
774 /*ignoreResult=*/true);
775 break;
776 }
777
778 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
779 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
780 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
781 llvm::Value *SizeVal = llvm::ConstantInt::get(
782 CGF.SizeTy,
784 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
785 break;
786 }
787
788 case CK_DerivedToBase:
789 case CK_BaseToDerived:
790 case CK_UncheckedDerivedToBase: {
791 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
792 "should have been unpacked before we got here");
793 }
794
795 case CK_NonAtomicToAtomic:
796 case CK_AtomicToNonAtomic: {
797 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
798
799 // Determine the atomic and value types.
800 QualType atomicType = E->getSubExpr()->getType();
801 QualType valueType = E->getType();
802 if (isToAtomic) std::swap(atomicType, valueType);
803
804 assert(atomicType->isAtomicType());
805 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
806 atomicType->castAs<AtomicType>()->getValueType()));
807
808 // Just recurse normally if we're ignoring the result or the
809 // atomic type doesn't change representation.
810 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
811 return Visit(E->getSubExpr());
812 }
813
814 CastKind peepholeTarget =
815 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
816
817 // These two cases are reverses of each other; try to peephole them.
818 if (Expr *op =
819 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
820 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
821 E->getType()) &&
822 "peephole significantly changed types?");
823 return Visit(op);
824 }
825
826 // If we're converting an r-value of non-atomic type to an r-value
827 // of atomic type, just emit directly into the relevant sub-object.
828 if (isToAtomic) {
829 AggValueSlot valueDest = Dest;
830 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
831 // Zero-initialize. (Strictly speaking, we only need to initialize
832 // the padding at the end, but this is simpler.)
833 if (!Dest.isZeroed())
835
836 // Build a GEP to refer to the subobject.
837 Address valueAddr =
838 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
839 valueDest = AggValueSlot::forAddr(valueAddr,
840 valueDest.getQualifiers(),
841 valueDest.isExternallyDestructed(),
842 valueDest.requiresGCollection(),
843 valueDest.isPotentiallyAliased(),
846 }
847
848 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
849 return;
850 }
851
852 // Otherwise, we're converting an atomic type to a non-atomic type.
853 // Make an atomic temporary, emit into that, and then copy the value out.
854 AggValueSlot atomicSlot =
855 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
856 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
857
858 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
859 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
860 return EmitFinalDestCopy(valueType, rvalue);
861 }
862 case CK_AddressSpaceConversion:
863 return Visit(E->getSubExpr());
864
865 case CK_LValueToRValue:
866 // If we're loading from a volatile type, force the destination
867 // into existence.
868 if (E->getSubExpr()->getType().isVolatileQualified()) {
869 bool Destruct =
870 !Dest.isExternallyDestructed() &&
872 if (Destruct)
874 EnsureDest(E->getType());
875 Visit(E->getSubExpr());
876
877 if (Destruct)
879 E->getType());
880
881 return;
882 }
883
884 [[fallthrough]];
885
886 case CK_HLSLArrayRValue:
887 Visit(E->getSubExpr());
888 break;
889
890 case CK_NoOp:
891 case CK_UserDefinedConversion:
892 case CK_ConstructorConversion:
893 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
894 E->getType()) &&
895 "Implicit cast types must be compatible");
896 Visit(E->getSubExpr());
897 break;
898
899 case CK_LValueBitCast:
900 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
901
902 case CK_Dependent:
903 case CK_BitCast:
904 case CK_ArrayToPointerDecay:
905 case CK_FunctionToPointerDecay:
906 case CK_NullToPointer:
907 case CK_NullToMemberPointer:
908 case CK_BaseToDerivedMemberPointer:
909 case CK_DerivedToBaseMemberPointer:
910 case CK_MemberPointerToBoolean:
911 case CK_ReinterpretMemberPointer:
912 case CK_IntegralToPointer:
913 case CK_PointerToIntegral:
914 case CK_PointerToBoolean:
915 case CK_ToVoid:
916 case CK_VectorSplat:
917 case CK_IntegralCast:
918 case CK_BooleanToSignedIntegral:
919 case CK_IntegralToBoolean:
920 case CK_IntegralToFloating:
921 case CK_FloatingToIntegral:
922 case CK_FloatingToBoolean:
923 case CK_FloatingCast:
924 case CK_CPointerToObjCPointerCast:
925 case CK_BlockPointerToObjCPointerCast:
926 case CK_AnyPointerToBlockPointerCast:
927 case CK_ObjCObjectLValueCast:
928 case CK_FloatingRealToComplex:
929 case CK_FloatingComplexToReal:
930 case CK_FloatingComplexToBoolean:
931 case CK_FloatingComplexCast:
932 case CK_FloatingComplexToIntegralComplex:
933 case CK_IntegralRealToComplex:
934 case CK_IntegralComplexToReal:
935 case CK_IntegralComplexToBoolean:
936 case CK_IntegralComplexCast:
937 case CK_IntegralComplexToFloatingComplex:
938 case CK_ARCProduceObject:
939 case CK_ARCConsumeObject:
940 case CK_ARCReclaimReturnedObject:
941 case CK_ARCExtendBlockObject:
942 case CK_CopyAndAutoreleaseBlockObject:
943 case CK_BuiltinFnToFnPtr:
944 case CK_ZeroToOCLOpaqueType:
945 case CK_MatrixCast:
946 case CK_HLSLVectorTruncation:
947
948 case CK_IntToOCLSampler:
949 case CK_FloatingToFixedPoint:
950 case CK_FixedPointToFloating:
951 case CK_FixedPointCast:
952 case CK_FixedPointToBoolean:
953 case CK_FixedPointToIntegral:
954 case CK_IntegralToFixedPoint:
955 llvm_unreachable("cast kind invalid for aggregate types");
956 }
957}
958
959void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
960 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
961 EmitAggLoadOfLValue(E);
962 return;
963 }
964
965 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
966 return CGF.EmitCallExpr(E, Slot);
967 });
968}
969
970void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
971 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
972 return CGF.EmitObjCMessageExpr(E, Slot);
973 });
974}
975
976void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
977 CGF.EmitIgnoredExpr(E->getLHS());
978 Visit(E->getRHS());
979}
980
981void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
982 CodeGenFunction::StmtExprEvaluation eval(CGF);
983 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
984}
985
990};
991
992static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
993 const BinaryOperator *E, llvm::Value *LHS,
994 llvm::Value *RHS, CompareKind Kind,
995 const char *NameSuffix = "") {
996 QualType ArgTy = E->getLHS()->getType();
997 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
998 ArgTy = CT->getElementType();
999
1000 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1001 assert(Kind == CK_Equal &&
1002 "member pointers may only be compared for equality");
1004 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1005 }
1006
1007 // Compute the comparison instructions for the specified comparison kind.
1008 struct CmpInstInfo {
1009 const char *Name;
1010 llvm::CmpInst::Predicate FCmp;
1011 llvm::CmpInst::Predicate SCmp;
1012 llvm::CmpInst::Predicate UCmp;
1013 };
1014 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1015 using FI = llvm::FCmpInst;
1016 using II = llvm::ICmpInst;
1017 switch (Kind) {
1018 case CK_Less:
1019 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1020 case CK_Greater:
1021 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1022 case CK_Equal:
1023 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1024 }
1025 llvm_unreachable("Unrecognised CompareKind enum");
1026 }();
1027
1028 if (ArgTy->hasFloatingRepresentation())
1029 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1030 llvm::Twine(InstInfo.Name) + NameSuffix);
1031 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1032 auto Inst =
1033 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1034 return Builder.CreateICmp(Inst, LHS, RHS,
1035 llvm::Twine(InstInfo.Name) + NameSuffix);
1036 }
1037
1038 llvm_unreachable("unsupported aggregate binary expression should have "
1039 "already been handled");
1040}
1041
1042void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1043 using llvm::BasicBlock;
1044 using llvm::PHINode;
1045 using llvm::Value;
1046 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1047 E->getRHS()->getType()));
1048 const ComparisonCategoryInfo &CmpInfo =
1050 assert(CmpInfo.Record->isTriviallyCopyable() &&
1051 "cannot copy non-trivially copyable aggregate");
1052
1053 QualType ArgTy = E->getLHS()->getType();
1054
1055 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1056 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1057 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1058 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1059 }
1060 bool IsComplex = ArgTy->isAnyComplexType();
1061
1062 // Evaluate the operands to the expression and extract their values.
1063 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1064 RValue RV = CGF.EmitAnyExpr(E);
1065 if (RV.isScalar())
1066 return {RV.getScalarVal(), nullptr};
1067 if (RV.isAggregate())
1068 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1069 assert(RV.isComplex());
1070 return RV.getComplexVal();
1071 };
1072 auto LHSValues = EmitOperand(E->getLHS()),
1073 RHSValues = EmitOperand(E->getRHS());
1074
1075 auto EmitCmp = [&](CompareKind K) {
1076 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1077 K, IsComplex ? ".r" : "");
1078 if (!IsComplex)
1079 return Cmp;
1080 assert(K == CompareKind::CK_Equal);
1081 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1082 RHSValues.second, K, ".i");
1083 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1084 };
1085 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1086 return Builder.getInt(VInfo->getIntValue());
1087 };
1088
1089 Value *Select;
1090 if (ArgTy->isNullPtrType()) {
1091 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1092 } else if (!CmpInfo.isPartial()) {
1093 Value *SelectOne =
1094 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1095 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1096 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1097 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1098 SelectOne, "sel.eq");
1099 } else {
1100 Value *SelectEq = Builder.CreateSelect(
1101 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1102 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1103 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1104 EmitCmpRes(CmpInfo.getGreater()),
1105 SelectEq, "sel.gt");
1106 Select = Builder.CreateSelect(
1107 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1108 }
1109 // Create the return value in the destination slot.
1110 EnsureDest(E->getType());
1111 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1112
1113 // Emit the address of the first (and only) field in the comparison category
1114 // type, and initialize it from the constant integer value selected above.
1116 DestLV, *CmpInfo.Record->field_begin());
1117 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1118
1119 // All done! The result is in the Dest slot.
1120}
1121
1122void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1123 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1124 VisitPointerToDataMemberBinaryOperator(E);
1125 else
1126 CGF.ErrorUnsupported(E, "aggregate binary expression");
1127}
1128
1129void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1130 const BinaryOperator *E) {
1132 EmitFinalDestCopy(E->getType(), LV);
1133}
1134
1135/// Is the value of the given expression possibly a reference to or
1136/// into a __block variable?
1137static bool isBlockVarRef(const Expr *E) {
1138 // Make sure we look through parens.
1139 E = E->IgnoreParens();
1140
1141 // Check for a direct reference to a __block variable.
1142 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1143 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1144 return (var && var->hasAttr<BlocksAttr>());
1145 }
1146
1147 // More complicated stuff.
1148
1149 // Binary operators.
1150 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1151 // For an assignment or pointer-to-member operation, just care
1152 // about the LHS.
1153 if (op->isAssignmentOp() || op->isPtrMemOp())
1154 return isBlockVarRef(op->getLHS());
1155
1156 // For a comma, just care about the RHS.
1157 if (op->getOpcode() == BO_Comma)
1158 return isBlockVarRef(op->getRHS());
1159
1160 // FIXME: pointer arithmetic?
1161 return false;
1162
1163 // Check both sides of a conditional operator.
1164 } else if (const AbstractConditionalOperator *op
1165 = dyn_cast<AbstractConditionalOperator>(E)) {
1166 return isBlockVarRef(op->getTrueExpr())
1167 || isBlockVarRef(op->getFalseExpr());
1168
1169 // OVEs are required to support BinaryConditionalOperators.
1170 } else if (const OpaqueValueExpr *op
1171 = dyn_cast<OpaqueValueExpr>(E)) {
1172 if (const Expr *src = op->getSourceExpr())
1173 return isBlockVarRef(src);
1174
1175 // Casts are necessary to get things like (*(int*)&var) = foo().
1176 // We don't really care about the kind of cast here, except
1177 // we don't want to look through l2r casts, because it's okay
1178 // to get the *value* in a __block variable.
1179 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1180 if (cast->getCastKind() == CK_LValueToRValue)
1181 return false;
1182 return isBlockVarRef(cast->getSubExpr());
1183
1184 // Handle unary operators. Again, just aggressively look through
1185 // it, ignoring the operation.
1186 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1187 return isBlockVarRef(uop->getSubExpr());
1188
1189 // Look into the base of a field access.
1190 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1191 return isBlockVarRef(mem->getBase());
1192
1193 // Look into the base of a subscript.
1194 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1195 return isBlockVarRef(sub->getBase());
1196 }
1197
1198 return false;
1199}
1200
1201void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1202 // For an assignment to work, the value on the right has
1203 // to be compatible with the value on the left.
1204 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1205 E->getRHS()->getType())
1206 && "Invalid assignment");
1207
1208 // If the LHS might be a __block variable, and the RHS can
1209 // potentially cause a block copy, we need to evaluate the RHS first
1210 // so that the assignment goes the right place.
1211 // This is pretty semantically fragile.
1212 if (isBlockVarRef(E->getLHS()) &&
1213 E->getRHS()->HasSideEffects(CGF.getContext())) {
1214 // Ensure that we have a destination, and evaluate the RHS into that.
1215 EnsureDest(E->getRHS()->getType());
1216 Visit(E->getRHS());
1217
1218 // Now emit the LHS and copy into it.
1219 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1220
1221 // That copy is an atomic copy if the LHS is atomic.
1222 if (LHS.getType()->isAtomicType() ||
1224 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1225 return;
1226 }
1227
1228 EmitCopy(E->getLHS()->getType(),
1230 needsGC(E->getLHS()->getType()),
1233 Dest);
1234 return;
1235 }
1236
1237 LValue LHS = CGF.EmitLValue(E->getLHS());
1238
1239 // If we have an atomic type, evaluate into the destination and then
1240 // do an atomic copy.
1241 if (LHS.getType()->isAtomicType() ||
1243 EnsureDest(E->getRHS()->getType());
1244 Visit(E->getRHS());
1245 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1246 return;
1247 }
1248
1249 // Codegen the RHS so that it stores directly into the LHS.
1251 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1253 // A non-volatile aggregate destination might have volatile member.
1254 if (!LHSSlot.isVolatile() &&
1255 CGF.hasVolatileMember(E->getLHS()->getType()))
1256 LHSSlot.setVolatile(true);
1257
1258 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1259
1260 // Copy into the destination if the assignment isn't ignored.
1261 EmitFinalDestCopy(E->getType(), LHS);
1262
1263 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1266 E->getType());
1267}
1268
1269void AggExprEmitter::
1270VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1271 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1272 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1273 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1274
1275 // Bind the common expression if necessary.
1276 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1277
1278 CodeGenFunction::ConditionalEvaluation eval(CGF);
1279 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1280 CGF.getProfileCount(E));
1281
1282 // Save whether the destination's lifetime is externally managed.
1283 bool isExternallyDestructed = Dest.isExternallyDestructed();
1284 bool destructNonTrivialCStruct =
1285 !isExternallyDestructed &&
1287 isExternallyDestructed |= destructNonTrivialCStruct;
1288 Dest.setExternallyDestructed(isExternallyDestructed);
1289
1290 eval.begin(CGF);
1291 CGF.EmitBlock(LHSBlock);
1293 CGF.incrementProfileCounter(E->getTrueExpr());
1294 else
1296 Visit(E->getTrueExpr());
1297 eval.end(CGF);
1298
1299 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1300 CGF.Builder.CreateBr(ContBlock);
1301
1302 // If the result of an agg expression is unused, then the emission
1303 // of the LHS might need to create a destination slot. That's fine
1304 // with us, and we can safely emit the RHS into the same slot, but
1305 // we shouldn't claim that it's already being destructed.
1306 Dest.setExternallyDestructed(isExternallyDestructed);
1307
1308 eval.begin(CGF);
1309 CGF.EmitBlock(RHSBlock);
1311 CGF.incrementProfileCounter(E->getFalseExpr());
1312 Visit(E->getFalseExpr());
1313 eval.end(CGF);
1314
1315 if (destructNonTrivialCStruct)
1317 E->getType());
1318
1319 CGF.EmitBlock(ContBlock);
1322}
1323
1324void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1325 Visit(CE->getChosenSubExpr());
1326}
1327
1328void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1329 Address ArgValue = Address::invalid();
1330 CGF.EmitVAArg(VE, ArgValue, Dest);
1331
1332 // If EmitVAArg fails, emit an error.
1333 if (!ArgValue.isValid()) {
1334 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1335 return;
1336 }
1337}
1338
1339void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1340 // Ensure that we have a slot, but if we already do, remember
1341 // whether it was externally destructed.
1342 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1343 EnsureDest(E->getType());
1344
1345 // We're going to push a destructor if there isn't already one.
1347
1348 Visit(E->getSubExpr());
1349
1350 // Push that destructor we promised.
1351 if (!wasExternallyDestructed)
1352 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1353}
1354
1355void
1356AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1357 AggValueSlot Slot = EnsureSlot(E->getType());
1358 CGF.EmitCXXConstructExpr(E, Slot);
1359}
1360
1361void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1362 const CXXInheritedCtorInitExpr *E) {
1363 AggValueSlot Slot = EnsureSlot(E->getType());
1365 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1366 E->inheritedFromVBase(), E);
1367}
1368
1369void
1370AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1371 AggValueSlot Slot = EnsureSlot(E->getType());
1372 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1373
1374 // We'll need to enter cleanup scopes in case any of the element
1375 // initializers throws an exception or contains branch out of the expressions.
1376 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1377
1378 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1379 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1380 e = E->capture_init_end();
1381 i != e; ++i, ++CurField) {
1382 // Emit initialization
1383 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1384 if (CurField->hasCapturedVLAType()) {
1385 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1386 continue;
1387 }
1388
1389 EmitInitializationToLValue(*i, LV);
1390
1391 // Push a destructor if necessary.
1392 if (QualType::DestructionKind DtorKind =
1393 CurField->getType().isDestructedType()) {
1394 assert(LV.isSimple());
1395 if (DtorKind)
1397 CurField->getType(),
1398 CGF.getDestroyer(DtorKind), false);
1399 }
1400 }
1401}
1402
1403void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1404 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1405 Visit(E->getSubExpr());
1406}
1407
1408void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1409 QualType T = E->getType();
1410 AggValueSlot Slot = EnsureSlot(T);
1411 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1412}
1413
1414void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1415 QualType T = E->getType();
1416 AggValueSlot Slot = EnsureSlot(T);
1417 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1418}
1419
1420/// Determine whether the given cast kind is known to always convert values
1421/// with all zero bits in their value representation to values with all zero
1422/// bits in their value representation.
1423static bool castPreservesZero(const CastExpr *CE) {
1424 switch (CE->getCastKind()) {
1425 // No-ops.
1426 case CK_NoOp:
1427 case CK_UserDefinedConversion:
1428 case CK_ConstructorConversion:
1429 case CK_BitCast:
1430 case CK_ToUnion:
1431 case CK_ToVoid:
1432 // Conversions between (possibly-complex) integral, (possibly-complex)
1433 // floating-point, and bool.
1434 case CK_BooleanToSignedIntegral:
1435 case CK_FloatingCast:
1436 case CK_FloatingComplexCast:
1437 case CK_FloatingComplexToBoolean:
1438 case CK_FloatingComplexToIntegralComplex:
1439 case CK_FloatingComplexToReal:
1440 case CK_FloatingRealToComplex:
1441 case CK_FloatingToBoolean:
1442 case CK_FloatingToIntegral:
1443 case CK_IntegralCast:
1444 case CK_IntegralComplexCast:
1445 case CK_IntegralComplexToBoolean:
1446 case CK_IntegralComplexToFloatingComplex:
1447 case CK_IntegralComplexToReal:
1448 case CK_IntegralRealToComplex:
1449 case CK_IntegralToBoolean:
1450 case CK_IntegralToFloating:
1451 // Reinterpreting integers as pointers and vice versa.
1452 case CK_IntegralToPointer:
1453 case CK_PointerToIntegral:
1454 // Language extensions.
1455 case CK_VectorSplat:
1456 case CK_MatrixCast:
1457 case CK_NonAtomicToAtomic:
1458 case CK_AtomicToNonAtomic:
1459 case CK_HLSLVectorTruncation:
1460 return true;
1461
1462 case CK_BaseToDerivedMemberPointer:
1463 case CK_DerivedToBaseMemberPointer:
1464 case CK_MemberPointerToBoolean:
1465 case CK_NullToMemberPointer:
1466 case CK_ReinterpretMemberPointer:
1467 // FIXME: ABI-dependent.
1468 return false;
1469
1470 case CK_AnyPointerToBlockPointerCast:
1471 case CK_BlockPointerToObjCPointerCast:
1472 case CK_CPointerToObjCPointerCast:
1473 case CK_ObjCObjectLValueCast:
1474 case CK_IntToOCLSampler:
1475 case CK_ZeroToOCLOpaqueType:
1476 // FIXME: Check these.
1477 return false;
1478
1479 case CK_FixedPointCast:
1480 case CK_FixedPointToBoolean:
1481 case CK_FixedPointToFloating:
1482 case CK_FixedPointToIntegral:
1483 case CK_FloatingToFixedPoint:
1484 case CK_IntegralToFixedPoint:
1485 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1486 return false;
1487
1488 case CK_AddressSpaceConversion:
1489 case CK_BaseToDerived:
1490 case CK_DerivedToBase:
1491 case CK_Dynamic:
1492 case CK_NullToPointer:
1493 case CK_PointerToBoolean:
1494 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1495 // same representation in all involved address spaces.
1496 return false;
1497
1498 case CK_ARCConsumeObject:
1499 case CK_ARCExtendBlockObject:
1500 case CK_ARCProduceObject:
1501 case CK_ARCReclaimReturnedObject:
1502 case CK_CopyAndAutoreleaseBlockObject:
1503 case CK_ArrayToPointerDecay:
1504 case CK_FunctionToPointerDecay:
1505 case CK_BuiltinFnToFnPtr:
1506 case CK_Dependent:
1507 case CK_LValueBitCast:
1508 case CK_LValueToRValue:
1509 case CK_LValueToRValueBitCast:
1510 case CK_UncheckedDerivedToBase:
1511 case CK_HLSLArrayRValue:
1512 return false;
1513 }
1514 llvm_unreachable("Unhandled clang::CastKind enum");
1515}
1516
1517/// isSimpleZero - If emitting this value will obviously just cause a store of
1518/// zero to memory, return true. This can return false if uncertain, so it just
1519/// handles simple cases.
1520static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1521 E = E->IgnoreParens();
1522 while (auto *CE = dyn_cast<CastExpr>(E)) {
1523 if (!castPreservesZero(CE))
1524 break;
1525 E = CE->getSubExpr()->IgnoreParens();
1526 }
1527
1528 // 0
1529 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1530 return IL->getValue() == 0;
1531 // +0.0
1532 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1533 return FL->getValue().isPosZero();
1534 // int()
1535 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1537 return true;
1538 // (int*)0 - Null pointer expressions.
1539 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1540 return ICE->getCastKind() == CK_NullToPointer &&
1542 !E->HasSideEffects(CGF.getContext());
1543 // '\0'
1544 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1545 return CL->getValue() == 0;
1546
1547 // Otherwise, hard case: conservatively return false.
1548 return false;
1549}
1550
1551
1552void
1553AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1554 QualType type = LV.getType();
1555 // FIXME: Ignore result?
1556 // FIXME: Are initializers affected by volatile?
1557 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1558 // Storing "i32 0" to a zero'd memory location is a noop.
1559 return;
1560 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1561 return EmitNullInitializationToLValue(LV);
1562 } else if (isa<NoInitExpr>(E)) {
1563 // Do nothing.
1564 return;
1565 } else if (type->isReferenceType()) {
1567 return CGF.EmitStoreThroughLValue(RV, LV);
1568 }
1569
1570 switch (CGF.getEvaluationKind(type)) {
1571 case TEK_Complex:
1572 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1573 return;
1574 case TEK_Aggregate:
1575 CGF.EmitAggExpr(
1580 return;
1581 case TEK_Scalar:
1582 if (LV.isSimple()) {
1583 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1584 } else {
1586 }
1587 return;
1588 }
1589 llvm_unreachable("bad evaluation kind");
1590}
1591
1592void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1593 QualType type = lv.getType();
1594
1595 // If the destination slot is already zeroed out before the aggregate is
1596 // copied into it, we don't have to emit any zeros here.
1597 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1598 return;
1599
1600 if (CGF.hasScalarEvaluationKind(type)) {
1601 // For non-aggregates, we can store the appropriate null constant.
1602 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1603 // Note that the following is not equivalent to
1604 // EmitStoreThroughBitfieldLValue for ARC types.
1605 if (lv.isBitField()) {
1607 } else {
1608 assert(lv.isSimple());
1609 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1610 }
1611 } else {
1612 // There's a potential optimization opportunity in combining
1613 // memsets; that would be easy for arrays, but relatively
1614 // difficult for structures with the current code.
1616 }
1617}
1618
1619void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1620 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1621 E->getInitializedFieldInUnion(),
1622 E->getArrayFiller());
1623}
1624
1625void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1626 if (E->hadArrayRangeDesignator())
1627 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1628
1629 if (E->isTransparent())
1630 return Visit(E->getInit(0));
1631
1632 VisitCXXParenListOrInitListExpr(
1633 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1634}
1635
1636void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1637 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1638 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1639#if 0
1640 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1641 // (Length of globals? Chunks of zeroed-out space?).
1642 //
1643 // If we can, prefer a copy from a global; this is a lot less code for long
1644 // globals, and it's easier for the current optimizers to analyze.
1645 if (llvm::Constant *C =
1646 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1647 llvm::GlobalVariable* GV =
1648 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1649 llvm::GlobalValue::InternalLinkage, C, "");
1650 EmitFinalDestCopy(ExprToVisit->getType(),
1651 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1652 return;
1653 }
1654#endif
1655
1656 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1657
1658 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1659
1660 // Handle initialization of an array.
1661 if (ExprToVisit->getType()->isConstantArrayType()) {
1662 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1663 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1664 InitExprs, ArrayFiller);
1665 return;
1666 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1667 // A variable array type that has an initializer can only do empty
1668 // initialization. And because this feature is not exposed as an extension
1669 // in C++, we can safely memset the array memory to zero.
1670 assert(InitExprs.size() == 0 &&
1671 "you can only use an empty initializer with VLAs");
1672 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1673 return;
1674 }
1675
1676 assert(ExprToVisit->getType()->isRecordType() &&
1677 "Only support structs/unions here!");
1678
1679 // Do struct initialization; this code just sets each individual member
1680 // to the approprate value. This makes bitfield support automatic;
1681 // the disadvantage is that the generated code is more difficult for
1682 // the optimizer, especially with bitfields.
1683 unsigned NumInitElements = InitExprs.size();
1684 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1685
1686 // We'll need to enter cleanup scopes in case any of the element
1687 // initializers throws an exception.
1689 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1690
1691 unsigned curInitIndex = 0;
1692
1693 // Emit initialization of base classes.
1694 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1695 assert(NumInitElements >= CXXRD->getNumBases() &&
1696 "missing initializer for base class");
1697 for (auto &Base : CXXRD->bases()) {
1698 assert(!Base.isVirtual() && "should not see vbases here");
1699 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1701 Dest.getAddress(), CXXRD, BaseRD,
1702 /*isBaseVirtual*/ false);
1704 V, Qualifiers(),
1708 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1709 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1710
1711 if (QualType::DestructionKind dtorKind =
1712 Base.getType().isDestructedType())
1713 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1714 }
1715 }
1716
1717 // Prepare a 'this' for CXXDefaultInitExprs.
1718 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1719
1720 if (record->isUnion()) {
1721 // Only initialize one field of a union. The field itself is
1722 // specified by the initializer list.
1723 if (!InitializedFieldInUnion) {
1724 // Empty union; we have nothing to do.
1725
1726#ifndef NDEBUG
1727 // Make sure that it's really an empty and not a failure of
1728 // semantic analysis.
1729 for (const auto *Field : record->fields())
1730 assert(
1731 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1732 "Only unnamed bitfields or anonymous class allowed");
1733#endif
1734 return;
1735 }
1736
1737 // FIXME: volatility
1738 FieldDecl *Field = InitializedFieldInUnion;
1739
1740 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1741 if (NumInitElements) {
1742 // Store the initializer into the field
1743 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1744 } else {
1745 // Default-initialize to null.
1746 EmitNullInitializationToLValue(FieldLoc);
1747 }
1748
1749 return;
1750 }
1751
1752 // Here we iterate over the fields; this makes it simpler to both
1753 // default-initialize fields and skip over unnamed fields.
1754 for (const auto *field : record->fields()) {
1755 // We're done once we hit the flexible array member.
1756 if (field->getType()->isIncompleteArrayType())
1757 break;
1758
1759 // Always skip anonymous bitfields.
1760 if (field->isUnnamedBitField())
1761 continue;
1762
1763 // We're done if we reach the end of the explicit initializers, we
1764 // have a zeroed object, and the rest of the fields are
1765 // zero-initializable.
1766 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1767 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1768 break;
1769
1770
1771 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1772 // We never generate write-barries for initialized fields.
1773 LV.setNonGC(true);
1774
1775 if (curInitIndex < NumInitElements) {
1776 // Store the initializer into the field.
1777 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1778 } else {
1779 // We're out of initializers; default-initialize to null
1780 EmitNullInitializationToLValue(LV);
1781 }
1782
1783 // Push a destructor if necessary.
1784 // FIXME: if we have an array of structures, all explicitly
1785 // initialized, we can end up pushing a linear number of cleanups.
1786 if (QualType::DestructionKind dtorKind
1787 = field->getType().isDestructedType()) {
1788 assert(LV.isSimple());
1789 if (dtorKind) {
1791 field->getType(),
1792 CGF.getDestroyer(dtorKind), false);
1793 }
1794 }
1795 }
1796}
1797
1798void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1799 llvm::Value *outerBegin) {
1800 // Emit the common subexpression.
1801 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1802
1803 Address destPtr = EnsureSlot(E->getType()).getAddress();
1804 uint64_t numElements = E->getArraySize().getZExtValue();
1805
1806 if (!numElements)
1807 return;
1808
1809 // destPtr is an array*. Construct an elementType* by drilling down a level.
1810 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1811 llvm::Value *indices[] = {zero, zero};
1812 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1813 destPtr.emitRawPointer(CGF),
1814 indices, "arrayinit.begin");
1815
1816 // Prepare to special-case multidimensional array initialization: we avoid
1817 // emitting multiple destructor loops in that case.
1818 if (!outerBegin)
1819 outerBegin = begin;
1820 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1821
1822 QualType elementType =
1824 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1825 CharUnits elementAlign =
1826 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1827 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1828
1829 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1830 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1831
1832 // Jump into the body.
1833 CGF.EmitBlock(bodyBB);
1834 llvm::PHINode *index =
1835 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1836 index->addIncoming(zero, entryBB);
1837 llvm::Value *element =
1838 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1839
1840 // Prepare for a cleanup.
1841 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1843 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1844 if (outerBegin->getType() != element->getType())
1845 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1846 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1847 elementAlign,
1848 CGF.getDestroyer(dtorKind));
1850 } else {
1851 dtorKind = QualType::DK_none;
1852 }
1853
1854 // Emit the actual filler expression.
1855 {
1856 // Temporaries created in an array initialization loop are destroyed
1857 // at the end of each iteration.
1858 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1859 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1860 LValue elementLV = CGF.MakeAddrLValue(
1861 Address(element, llvmElementType, elementAlign), elementType);
1862
1863 if (InnerLoop) {
1864 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1865 auto elementSlot = AggValueSlot::forLValue(
1866 elementLV, AggValueSlot::IsDestructed,
1869 AggExprEmitter(CGF, elementSlot, false)
1870 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1871 } else
1872 EmitInitializationToLValue(E->getSubExpr(), elementLV);
1873 }
1874
1875 // Move on to the next element.
1876 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1877 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1878 index->addIncoming(nextIndex, Builder.GetInsertBlock());
1879
1880 // Leave the loop if we're done.
1881 llvm::Value *done = Builder.CreateICmpEQ(
1882 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1883 "arrayinit.done");
1884 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1885 Builder.CreateCondBr(done, endBB, bodyBB);
1886
1887 CGF.EmitBlock(endBB);
1888
1889 // Leave the partial-array cleanup if we entered one.
1890 if (dtorKind)
1891 CGF.DeactivateCleanupBlock(cleanup, index);
1892}
1893
1894void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1895 AggValueSlot Dest = EnsureSlot(E->getType());
1896
1897 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1898 EmitInitializationToLValue(E->getBase(), DestLV);
1899 VisitInitListExpr(E->getUpdater());
1900}
1901
1902//===----------------------------------------------------------------------===//
1903// Entry Points into this File
1904//===----------------------------------------------------------------------===//
1905
1906/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1907/// non-zero bytes that will be stored when outputting the initializer for the
1908/// specified initializer expression.
1910 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1911 E = MTE->getSubExpr();
1913
1914 // 0 and 0.0 won't require any non-zero stores!
1915 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1916
1917 // If this is an initlist expr, sum up the size of sizes of the (present)
1918 // elements. If this is something weird, assume the whole thing is non-zero.
1919 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1920 while (ILE && ILE->isTransparent())
1921 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1922 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1923 return CGF.getContext().getTypeSizeInChars(E->getType());
1924
1925 // InitListExprs for structs have to be handled carefully. If there are
1926 // reference members, we need to consider the size of the reference, not the
1927 // referencee. InitListExprs for unions and arrays can't have references.
1928 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1929 if (!RT->isUnionType()) {
1930 RecordDecl *SD = RT->getDecl();
1931 CharUnits NumNonZeroBytes = CharUnits::Zero();
1932
1933 unsigned ILEElement = 0;
1934 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1935 while (ILEElement != CXXRD->getNumBases())
1936 NumNonZeroBytes +=
1937 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1938 for (const auto *Field : SD->fields()) {
1939 // We're done once we hit the flexible array member or run out of
1940 // InitListExpr elements.
1941 if (Field->getType()->isIncompleteArrayType() ||
1942 ILEElement == ILE->getNumInits())
1943 break;
1944 if (Field->isUnnamedBitField())
1945 continue;
1946
1947 const Expr *E = ILE->getInit(ILEElement++);
1948
1949 // Reference values are always non-null and have the width of a pointer.
1950 if (Field->getType()->isReferenceType())
1951 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1952 CGF.getTarget().getPointerWidth(LangAS::Default));
1953 else
1954 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1955 }
1956
1957 return NumNonZeroBytes;
1958 }
1959 }
1960
1961 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1962 CharUnits NumNonZeroBytes = CharUnits::Zero();
1963 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1964 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1965 return NumNonZeroBytes;
1966}
1967
1968/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1969/// zeros in it, emit a memset and avoid storing the individual zeros.
1970///
1972 CodeGenFunction &CGF) {
1973 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1974 // volatile stores.
1975 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1976 return;
1977
1978 // C++ objects with a user-declared constructor don't need zero'ing.
1979 if (CGF.getLangOpts().CPlusPlus)
1980 if (const RecordType *RT = CGF.getContext()
1982 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1984 return;
1985 }
1986
1987 // If the type is 16-bytes or smaller, prefer individual stores over memset.
1988 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1989 if (Size <= CharUnits::fromQuantity(16))
1990 return;
1991
1992 // Check to see if over 3/4 of the initializer are known to be zero. If so,
1993 // we prefer to emit memset + individual stores for the rest.
1994 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1995 if (NumNonZeroBytes*4 > Size)
1996 return;
1997
1998 // Okay, it seems like a good idea to use an initial memset, emit the call.
1999 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2000
2002 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2003
2004 // Tell the AggExprEmitter that the slot is known zero.
2005 Slot.setZeroed();
2006}
2007
2008
2009
2010
2011/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2012/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2013/// the value of the aggregate expression is not needed. If VolatileDest is
2014/// true, DestPtr cannot be 0.
2015void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2016 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2017 "Invalid aggregate expression to emit");
2018 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2019 "slot has bits but no address");
2020
2021 // Optimize the slot if possible.
2022 CheckAggExprForMemSetUse(Slot, E, *this);
2023
2024 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2025}
2026
2028 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2029 Address Temp = CreateMemTemp(E->getType());
2030 LValue LV = MakeAddrLValue(Temp, E->getType());
2035 return LV;
2036}
2037
2039 const LValue &Src,
2040 ExprValueKind SrcKind) {
2041 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2042 .EmitFinalDestCopy(Type, Src, SrcKind);
2043}
2044
2047 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2049
2050 // Empty fields can overlap earlier fields.
2051 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2053
2054 // If the field lies entirely within the enclosing class's nvsize, its tail
2055 // padding cannot overlap any already-initialized object. (The only subobjects
2056 // with greater addresses that might already be initialized are vbases.)
2057 const RecordDecl *ClassRD = FD->getParent();
2058 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2059 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2060 getContext().getTypeSize(FD->getType()) <=
2061 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2063
2064 // The tail padding may contain values we need to preserve.
2066}
2067
2069 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2070 // If the most-derived object is a field declared with [[no_unique_address]],
2071 // the tail padding of any virtual base could be reused for other subobjects
2072 // of that field's class.
2073 if (IsVirtual)
2075
2076 // Empty bases can overlap earlier bases.
2077 if (BaseRD->isEmpty())
2079
2080 // If the base class is laid out entirely within the nvsize of the derived
2081 // class, its tail padding cannot yet be initialized, so we can issue
2082 // stores at the full width of the base class.
2083 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2084 if (Layout.getBaseClassOffset(BaseRD) +
2085 getContext().getASTRecordLayout(BaseRD).getSize() <=
2086 Layout.getNonVirtualSize())
2088
2089 // The tail padding may contain values we need to preserve.
2091}
2092
2094 AggValueSlot::Overlap_t MayOverlap,
2095 bool isVolatile) {
2096 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2097
2098 Address DestPtr = Dest.getAddress();
2099 Address SrcPtr = Src.getAddress();
2100
2101 if (getLangOpts().CPlusPlus) {
2102 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2103 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2104 assert((Record->hasTrivialCopyConstructor() ||
2105 Record->hasTrivialCopyAssignment() ||
2106 Record->hasTrivialMoveConstructor() ||
2107 Record->hasTrivialMoveAssignment() ||
2108 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2109 "Trying to aggregate-copy a type without a trivial copy/move "
2110 "constructor or assignment operator");
2111 // Ignore empty classes in C++.
2112 if (Record->isEmpty())
2113 return;
2114 }
2115 }
2116
2117 if (getLangOpts().CUDAIsDevice) {
2119 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2120 Src))
2121 return;
2122 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2123 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2124 Src))
2125 return;
2126 }
2127 }
2128
2129 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2130 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2131 // read from another object that overlaps in anyway the storage of the first
2132 // object, then the overlap shall be exact and the two objects shall have
2133 // qualified or unqualified versions of a compatible type."
2134 //
2135 // memcpy is not defined if the source and destination pointers are exactly
2136 // equal, but other compilers do this optimization, and almost every memcpy
2137 // implementation handles this case safely. If there is a libc that does not
2138 // safely handle this, we can add a target hook.
2139
2140 // Get data size info for this aggregate. Don't copy the tail padding if this
2141 // might be a potentially-overlapping subobject, since the tail padding might
2142 // be occupied by a different object. Otherwise, copying it is fine.
2144 if (MayOverlap)
2146 else
2148
2149 llvm::Value *SizeVal = nullptr;
2150 if (TypeInfo.Width.isZero()) {
2151 // But note that getTypeInfo returns 0 for a VLA.
2152 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2153 getContext().getAsArrayType(Ty))) {
2154 QualType BaseEltTy;
2155 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2156 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2157 assert(!TypeInfo.Width.isZero());
2158 SizeVal = Builder.CreateNUWMul(
2159 SizeVal,
2160 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2161 }
2162 }
2163 if (!SizeVal) {
2164 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2165 }
2166
2167 // FIXME: If we have a volatile struct, the optimizer can remove what might
2168 // appear to be `extra' memory ops:
2169 //
2170 // volatile struct { int i; } a, b;
2171 //
2172 // int main() {
2173 // a = b;
2174 // a = b;
2175 // }
2176 //
2177 // we need to use a different call here. We use isVolatile to indicate when
2178 // either the source or the destination is volatile.
2179
2180 DestPtr = DestPtr.withElementType(Int8Ty);
2181 SrcPtr = SrcPtr.withElementType(Int8Ty);
2182
2183 // Don't do any of the memmove_collectable tests if GC isn't set.
2184 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2185 // fall through
2186 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2187 RecordDecl *Record = RecordTy->getDecl();
2188 if (Record->hasObjectMember()) {
2189 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2190 SizeVal);
2191 return;
2192 }
2193 } else if (Ty->isArrayType()) {
2194 QualType BaseType = getContext().getBaseElementType(Ty);
2195 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2196 if (RecordTy->getDecl()->hasObjectMember()) {
2197 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2198 SizeVal);
2199 return;
2200 }
2201 }
2202 }
2203
2204 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2205
2206 // Determine the metadata to describe the position of any padding in this
2207 // memcpy, as well as the TBAA tags for the members of the struct, in case
2208 // the optimizer wishes to expand it in to scalar memory operations.
2209 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2210 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2211
2212 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2214 Dest.getTBAAInfo(), Src.getTBAAInfo());
2215 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2216 }
2217}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3341
CompareKind
Definition: CGExprAgg.cpp:986
@ CK_Greater
Definition: CGExprAgg.cpp:988
@ CK_Less
Definition: CGExprAgg.cpp:987
@ CK_Equal
Definition: CGExprAgg.cpp:989
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
Definition: CGExprAgg.cpp:1909
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
Definition: CGExprAgg.cpp:726
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
Definition: CGExprAgg.cpp:1137
static bool isTrivialFiller(Expr *E)
Determine if E is a trivial array filler, that is, one that is equivalent to zero-initialization.
Definition: CGExprAgg.cpp:469
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
Definition: CGExprAgg.cpp:1520
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:992
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
Definition: CGExprAgg.cpp:1423
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
Definition: CGExprAgg.cpp:1971
Expr * E
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
llvm::MachO::Record Record
Definition: MachO.h:31
SourceLocation Loc
Definition: SemaObjC.cpp:759
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:187
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2825
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
Definition: ASTContext.h:2644
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
Definition: ASTContext.h:2325
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2675
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:210
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4175
Represents a loop initializing the elements of an array.
Definition: Expr.h:5703
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2674
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3571
QualType getElementType() const
Definition: Type.h:3583
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6629
QualType getValueType() const
Gets the type contained by this atomic type, i.e.
Definition: Type.h:7585
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3860
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1085
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a list-initialization with parenthesis.
Definition: ExprCXX.h:4954
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition: DeclCXX.cpp:612
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition: DeclCXX.h:792
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1191
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
An expression "T()" which creates an rvalue of a non-class type T.
Definition: ExprCXX.h:2182
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition: ExprCXX.h:797
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3498
CastKind getCastKind() const
Definition: Expr.h:3542
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4592
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4628
Represents a 'co_await' expression.
Definition: ExprCXX.h:5185
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
void setVolatile(bool flag)
Definition: CGValue.h:623
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition: CGValue.h:682
NeedsGCBarriers_t requiresGCollection() const
Definition: CGValue.h:634
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
void setZeroed(bool V=true)
Definition: CGValue.h:674
IsZeroed_t isZeroed() const
Definition: CGValue.h:675
Qualifiers getQualifiers() const
Definition: CGValue.h:617
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
IsAliased_t isPotentiallyAliased() const
Definition: CGValue.h:654
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
IsDestructed_t isExternallyDestructed() const
Definition: CGValue.h:610
Overlap_t mayOverlap() const
Definition: CGValue.h:658
RValue asRValue() const
Definition: CGValue.h:666
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:640
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:855
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:396
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:218
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:363
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
void callCStructCopyConstructor(LValue Dst, LValue Src)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1244
llvm::MDNode * getTBAAStructInfo(QualType QTy)
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, TBAAAccessInfo SrcInfo)
mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the purposes of memory transfer call...
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:639
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isSimple() const
Definition: CGValue.h:278
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition: CGValue.h:88
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
bool isComplex() const
Definition: CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:372
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3139
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3428
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:3609
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5266
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2370
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool hasAttr() const
Definition: DeclBase.h:584
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3474
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3097
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3567
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3030
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4630
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3247
Represents a C11 generic selection.
Definition: Expr.h:5917
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5792
Describes an C or C++ initializer list.
Definition: Expr.h:5039
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2443
unsigned getNumInits() const
Definition: Expr.h:5069
const Expr * getInit(unsigned Init) const
Definition: Expr.h:5085
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1954
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: ExprCXX.h:2066
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4728
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3187
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3513
Represents a place-holder for an object not to be initialized by anything.
Definition: Expr.h:5612
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2135
const Expr * getSubExpr() const
Definition: Expr.h:2150
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6497
A (possibly-)qualified type.
Definition: Type.h:941
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7839
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2751
@ DK_nontrivial_c_struct
Definition: Type.h:1535
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7881
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1542
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2596
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition: Type.h:1514
The collection of all-type qualifiers we support.
Definition: Type.h:319
Represents a struct/union/class.
Definition: Decl.h:4145
bool hasObjectMember() const
Definition: Decl.h:4205
field_range fields() const
Definition: Decl.h:4351
field_iterator field_begin() const
Definition: Decl.cpp:5068
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5970
RecordDecl * getDecl() const
Definition: Type.h:5980
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4417
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4484
bool isUnion() const
Definition: Decl.h:3767
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:472
The base class of the type hierarchy.
Definition: Type.h:1829
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1882
bool isConstantArrayType() const
Definition: Type.h:8084
bool isArrayType() const
Definition: Type.h:8080
bool isPointerType() const
Definition: Type.h:8008
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8612
bool isVariableArrayType() const
Definition: Type.h:8092
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:4994
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8439
bool isAnyComplexType() const
Definition: Type.h:8116
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2186
bool isMemberPointerType() const
Definition: Type.h:8062
bool isAtomicType() const
Definition: Type.h:8163
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:5001
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2258
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2266
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8545
bool isNullPtrType() const
Definition: Type.h:8357
bool isRecordType() const
Definition: Type.h:8108
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2188
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4701
QualType getType() const
Definition: Decl.h:678
Represents a variable declaration or definition.
Definition: Decl.h:879
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3844
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2263
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1119
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:56
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
CastKind
CastKind - The kind of operation required for a conversion.
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:132
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition: Address.h:325
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
uint64_t Width
Definition: ASTContext.h:158