clang 23.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGDebugInfo.h"
15#include "CGHLSLRuntime.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "EHScopeStack.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclCXX.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/IntrinsicInst.h"
33#include "llvm/IR/Intrinsics.h"
34using namespace clang;
35using namespace CodeGen;
36
37//===----------------------------------------------------------------------===//
38// Aggregate Expression Emitter
39//===----------------------------------------------------------------------===//
40
41namespace {
42class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
43 CodeGenFunction &CGF;
44 CGBuilderTy &Builder;
45 AggValueSlot Dest;
46 bool IsResultUnused;
47
48 AggValueSlot EnsureSlot(QualType T) {
49 if (!Dest.isIgnored())
50 return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored())
55 return;
56 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
57 }
58
59 // Calls `Fn` with a valid return value slot, potentially creating a temporary
60 // to do so. If a temporary is created, an appropriate copy into `Dest` will
61 // be emitted, as will lifetime markers.
62 //
63 // The given function should take a ReturnValueSlot, and return an RValue that
64 // points to said slot.
65 void withReturnValueSlot(const Expr *E,
66 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
67
68 void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd,
69 const FieldDecl *NextField);
70
71public:
72 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
73 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
74 IsResultUnused(IsResultUnused) {}
75
76 //===--------------------------------------------------------------------===//
77 // Utilities
78 //===--------------------------------------------------------------------===//
79
80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81 /// represents a value lvalue, this method emits the address of the lvalue,
82 /// then loads the result into DestPtr.
83 void EmitAggLoadOfLValue(const Expr *E);
84
85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86 /// SrcIsRValue is true if source comes from an RValue.
87 void EmitFinalDestCopy(QualType type, const LValue &src,
90 void EmitFinalDestCopy(QualType type, RValue src);
91 void EmitCopy(QualType type, const AggValueSlot &dest,
92 const AggValueSlot &src);
93
94 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
95 Expr *ExprToVisit, ArrayRef<Expr *> Args,
96 Expr *ArrayFiller);
97
98 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
102 }
103
104 bool TypeRequiresGCollection(QualType T);
105
106 //===--------------------------------------------------------------------===//
107 // Visitor Methods
108 //===--------------------------------------------------------------------===//
109
110 void Visit(Expr *E) {
111 ApplyDebugLocation DL(CGF, E);
112 StmtVisitor<AggExprEmitter>::Visit(E);
113 }
114
115 void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); }
116 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
117 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
118 Visit(GE->getResultExpr());
119 }
120 void VisitCoawaitExpr(CoawaitExpr *E) {
121 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
122 }
123 void VisitCoyieldExpr(CoyieldExpr *E) {
124 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
125 }
126 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
127 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
128 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
129 return Visit(E->getReplacement());
130 }
131
132 void VisitConstantExpr(ConstantExpr *E) {
133 EnsureDest(E->getType());
134
135 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
137 Result, E->getType(), Dest.getAddress(),
138 llvm::TypeSize::getFixed(
139 Dest.getPreferredSize(CGF.getContext(), E->getType())
140 .getQuantity()),
142 return;
143 }
144 return Visit(E->getSubExpr());
145 }
146
147 // l-values.
148 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
149 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
150 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
151 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
152 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
153 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
154 EmitAggLoadOfLValue(E);
155 }
156 void VisitPredefinedExpr(const PredefinedExpr *E) { EmitAggLoadOfLValue(E); }
157
158 // Operators.
159 void VisitCastExpr(CastExpr *E);
160 void VisitCallExpr(const CallExpr *E);
161 void VisitStmtExpr(const StmtExpr *E);
162 void VisitBinaryOperator(const BinaryOperator *BO);
163 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
164 void VisitBinAssign(const BinaryOperator *E);
165 void VisitBinComma(const BinaryOperator *E);
166 void VisitBinCmp(const BinaryOperator *E);
167 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
168 Visit(E->getSemanticForm());
169 }
170
171 void VisitObjCMessageExpr(ObjCMessageExpr *E);
172 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { EmitAggLoadOfLValue(E); }
173
174 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
175 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
176 void VisitChooseExpr(const ChooseExpr *CE);
177 void VisitInitListExpr(InitListExpr *E);
178 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
179 FieldDecl *InitializedFieldInUnion,
180 Expr *ArrayFiller);
181 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
182 llvm::Value *outerBegin = nullptr);
183 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
184 void VisitNoInitExpr(NoInitExpr *E) {} // Do nothing.
185 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
186 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
187 Visit(DAE->getExpr());
188 }
189 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
190 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
191 Visit(DIE->getExpr());
192 }
193 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
194 void VisitCXXConstructExpr(const CXXConstructExpr *E);
195 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
196 void VisitLambdaExpr(LambdaExpr *E);
197 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
198 void VisitExprWithCleanups(ExprWithCleanups *E);
199 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
200 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
201 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
202 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
203
204 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
205 if (E->isGLValue()) {
206 LValue LV = CGF.EmitPseudoObjectLValue(E);
207 return EmitFinalDestCopy(E->getType(), LV);
208 }
209
210 AggValueSlot Slot = EnsureSlot(E->getType());
211 bool NeedsDestruction =
212 !Slot.isExternallyDestructed() &&
214 if (NeedsDestruction)
216 CGF.EmitPseudoObjectRValue(E, Slot);
217 if (NeedsDestruction)
219 E->getType());
220 }
221
222 void VisitVAArgExpr(VAArgExpr *E);
223 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
224 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
225 Expr *ArrayFiller);
226
227 void EmitInitializationToLValue(Expr *E, LValue Address);
228 void EmitNullInitializationToLValue(LValue Address);
229 // case Expr::ChooseExprClass:
230 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
231 void VisitAtomicExpr(AtomicExpr *E) {
232 RValue Res = CGF.EmitAtomicExpr(E);
233 EmitFinalDestCopy(E->getType(), Res);
234 }
235 void VisitPackIndexingExpr(PackIndexingExpr *E) {
236 Visit(E->getSelectedExpr());
237 }
238};
239} // end anonymous namespace.
240
241//===----------------------------------------------------------------------===//
242// Utilities
243//===----------------------------------------------------------------------===//
244
245/// EmitAggLoadOfLValue - Given an expression with aggregate type that
246/// represents a value lvalue, this method emits the address of the lvalue,
247/// then loads the result into DestPtr.
248void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
249 LValue LV = CGF.EmitLValue(E);
250
251 // If the type of the l-value is atomic, then do an atomic load.
252 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
253 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
254 return;
255 }
256
257 EmitFinalDestCopy(E->getType(), LV);
258}
259
260/// True if the given aggregate type requires special GC API calls.
261bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
262 // Only record types have members that might require garbage collection.
263 const auto *Record = T->getAsRecordDecl();
264 if (!Record)
265 return false;
266
267 // Don't mess with non-trivial C++ types.
269 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
270 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
271 return false;
272
273 // Check whether the type has an object member.
274 return Record->hasObjectMember();
275}
276
277void AggExprEmitter::withReturnValueSlot(
278 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
279 QualType RetTy = E->getType();
280 bool RequiresDestruction =
281 !Dest.isExternallyDestructed() &&
283
284 // If it makes no observable difference, save a memcpy + temporary.
285 //
286 // We need to always provide our own temporary if destruction is required.
287 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
288 // its lifetime before we have the chance to emit a proper destructor call.
289 //
290 // We also need a temporary if the destination is in a different address space
291 // from the alloca AS, to avoid an invalid addrspacecast on the sret pointer.
292 // Look through addrspacecasts to avoid unnecessary temps when the
293 // destination is already in the alloca AS.
294 unsigned SRetAS = CGF.getContext().getTargetAddressSpace(
296 bool DestASMismatch = !Dest.isIgnored() &&
297 RetTy.isTriviallyCopyableType(CGF.getContext()) &&
298 Dest.getAddress()
300 ->stripPointerCasts()
301 ->getType()
302 ->getPointerAddressSpace() != SRetAS;
303 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
304 (RequiresDestruction && Dest.isIgnored()) || DestASMismatch;
305
306 Address RetAddr = Address::invalid();
307
308 EHScopeStack::stable_iterator LifetimeEndBlock;
309 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
310 if (!UseTemp) {
311 RetAddr = Dest.getAddress();
312 if (RetAddr.isValid() && RetAddr.getAddressSpace() != SRetAS) {
313 llvm::Type *SRetPtrTy =
314 llvm::PointerType::get(CGF.getLLVMContext(), SRetAS);
315 RetAddr = RetAddr.withPointer(
316 CGF.performAddrSpaceCast(RetAddr.getBasePointer(), SRetPtrTy),
317 RetAddr.isKnownNonNull());
318 }
319 } else {
320 RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
321 if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
322 LifetimeStartInst =
323 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
324 assert(LifetimeStartInst->getIntrinsicID() ==
325 llvm::Intrinsic::lifetime_start &&
326 "Last insertion wasn't a lifetime.start?");
327
328 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
329 NormalEHLifetimeMarker, RetAddr);
330 LifetimeEndBlock = CGF.EHStack.stable_begin();
331 }
332 }
333
334 RValue Src =
335 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
336 Dest.isExternallyDestructed()));
337
338 if (!UseTemp)
339 return;
340
341 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
342 Src.getAggregatePointer(E->getType(), CGF));
343 EmitFinalDestCopy(E->getType(), Src);
344
345 if (!RequiresDestruction && LifetimeStartInst) {
346 // If there's no dtor to run, the copy was the last use of our temporary.
347 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
348 // eagerly.
349 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
350 CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
351 }
352}
353
354/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
355void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
356 assert(src.isAggregate() && "value must be aggregate value!");
357 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
358 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
359}
360
361/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
362void AggExprEmitter::EmitFinalDestCopy(
363 QualType type, const LValue &src,
364 CodeGenFunction::ExprValueKind SrcValueKind) {
365 // If Dest is ignored, then we're evaluating an aggregate expression
366 // in a context that doesn't care about the result. Note that loads
367 // from volatile l-values force the existence of a non-ignored
368 // destination.
369 if (Dest.isIgnored())
370 return;
371
372 // Copy non-trivial C structs here.
373 LValue DstLV = CGF.MakeAddrLValue(
374 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
375
376 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
377 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
378 if (Dest.isPotentiallyAliased())
379 CGF.callCStructMoveAssignmentOperator(DstLV, src);
380 else
381 CGF.callCStructMoveConstructor(DstLV, src);
382 return;
383 }
384 } else {
385 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
386 if (Dest.isPotentiallyAliased())
387 CGF.callCStructCopyAssignmentOperator(DstLV, src);
388 else
389 CGF.callCStructCopyConstructor(DstLV, src);
390 return;
391 }
392 }
393
394 AggValueSlot srcAgg = AggValueSlot::forLValue(
397 EmitCopy(type, Dest, srcAgg);
398}
399
400/// Perform a copy from the source into the destination.
401///
402/// \param type - the type of the aggregate being copied; qualifiers are
403/// ignored
404void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
405 const AggValueSlot &src) {
406 if (dest.requiresGCollection()) {
407 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
408 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
410 src.getAddress(), size);
411 return;
412 }
413
414 // If the result of the assignment is used, copy the LHS there also.
415 // It's volatile if either side is. Use the minimum alignment of
416 // the two sides.
417 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
418 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
419 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
420 dest.isVolatile() || src.isVolatile());
421}
422
423/// Emit the initializer for a std::initializer_list initialized with a
424/// real initializer list.
425void AggExprEmitter::VisitCXXStdInitializerListExpr(
426 CXXStdInitializerListExpr *E) {
427 // Emit an array containing the elements. The array is externally destructed
428 // if the std::initializer_list object is.
429 ASTContext &Ctx = CGF.getContext();
430 LValue Array = CGF.EmitLValue(E->getSubExpr());
431 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
432 Address ArrayPtr = Array.getAddress();
433
434 const ConstantArrayType *ArrayType =
436 assert(ArrayType && "std::initializer_list constructed from non-array");
437
438 auto *Record = E->getType()->castAsRecordDecl();
439 RecordDecl::field_iterator Field = Record->field_begin();
440 assert(Field != Record->field_end() &&
441 Ctx.hasSameType(Field->getType()->getPointeeType(),
442 ArrayType->getElementType()) &&
443 "Expected std::initializer_list first field to be const E *");
444
445 // Start pointer.
446 AggValueSlot Dest = EnsureSlot(E->getType());
447 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
448 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
449 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
450 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
451 ++Field;
452 assert(Field != Record->field_end() &&
453 "Expected std::initializer_list to have two fields");
454
455 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
456 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
457 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
458 // Length.
459 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
460
461 } else {
462 // End pointer.
463 assert(Field->getType()->isPointerType() &&
464 Ctx.hasSameType(Field->getType()->getPointeeType(),
465 ArrayType->getElementType()) &&
466 "Expected std::initializer_list second field to be const E *");
467 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
468 llvm::Value *IdxEnd[] = {Zero, Size};
469 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
470 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
471 "arrayend");
472 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
473 }
474
475 assert(++Field == Record->field_end() &&
476 "Expected std::initializer_list to only have two fields");
477}
478
479/// Determine if E is a trivial array filler, that is, one that is
480/// equivalent to zero-initialization.
481static bool isTrivialFiller(Expr *E) {
482 if (!E)
483 return true;
484
486 return true;
487
488 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
489 if (ILE->getNumInits())
490 return false;
491 return isTrivialFiller(ILE->getArrayFiller());
492 }
493
494 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
495 return Cons->getConstructor()->isDefaultConstructor() &&
496 Cons->getConstructor()->isTrivial();
497
498 // FIXME: Are there other cases where we can avoid emitting an initializer?
499 return false;
500}
501
502// emit an elementwise cast where the RHS is a scalar or vector
503// or emit an aggregate splat cast
505 LValue DestVal,
506 llvm::Value *SrcVal,
507 QualType SrcTy,
508 SourceLocation Loc) {
509 // Flatten our destination
510 SmallVector<LValue, 16> StoreList;
511 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
512
513 bool isVector = false;
514 if (auto *VT = SrcTy->getAs<VectorType>()) {
515 isVector = true;
516 SrcTy = VT->getElementType();
517 assert(StoreList.size() <= VT->getNumElements() &&
518 "Cannot perform HLSL flat cast when vector source \
519 object has less elements than flattened destination \
520 object.");
521 }
522
523 for (unsigned I = 0, Size = StoreList.size(); I < Size; I++) {
524 LValue DestLVal = StoreList[I];
525 llvm::Value *Load =
526 isVector ? CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load")
527 : SrcVal;
528 llvm::Value *Cast =
529 CGF.EmitScalarConversion(Load, SrcTy, DestLVal.getType(), Loc);
530 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
531 }
532}
533
534// emit a flat cast where the RHS is an aggregate
535static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal,
536 LValue SrcVal, SourceLocation Loc) {
537 // Flatten our destination
538 SmallVector<LValue, 16> StoreList;
539 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
540 // Flatten our src
542 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
543
544 assert(StoreList.size() <= LoadList.size() &&
545 "Cannot perform HLSL elementwise cast when flattened source object \
546 has less elements than flattened destination object.");
547 // apply casts to what we load from LoadList
548 // and store result in Dest
549 for (unsigned I = 0, E = StoreList.size(); I < E; I++) {
550 LValue DestLVal = StoreList[I];
551 LValue SrcLVal = LoadList[I];
552 RValue RVal = CGF.EmitLoadOfLValue(SrcLVal, Loc);
553 assert(RVal.isScalar() && "All flattened source values should be scalars");
554 llvm::Value *Val = RVal.getScalarVal();
555 llvm::Value *Cast = CGF.EmitScalarConversion(Val, SrcLVal.getType(),
556 DestLVal.getType(), Loc);
557 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
558 }
559}
560
561/// Emit initialization of an array from an initializer list. ExprToVisit must
562/// be either an InitListEpxr a CXXParenInitListExpr.
563void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
564 QualType ArrayQTy, Expr *ExprToVisit,
565 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
566 uint64_t NumInitElements = Args.size();
567
568 uint64_t NumArrayElements = AType->getNumElements();
569 for (const auto *Init : Args) {
570 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
571 NumInitElements += Embed->getDataElementCount() - 1;
572 if (NumInitElements > NumArrayElements) {
573 NumInitElements = NumArrayElements;
574 break;
575 }
576 }
577 }
578
579 assert(NumInitElements <= NumArrayElements);
580
581 QualType elementType =
582 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
583 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
584 CharUnits elementAlign =
585 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
586 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
587
588 // Consider initializing the array by copying from a global. For this to be
589 // more efficient than per-element initialization, the size of the elements
590 // with explicit initializers should be large enough.
591 if (NumInitElements * elementSize.getQuantity() > 16 &&
592 elementType.isTriviallyCopyableType(CGF.getContext())) {
593 CodeGen::CodeGenModule &CGM = CGF.CGM;
594 ConstantEmitter Emitter(CGF);
595 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
596 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
598 LangAS AS = GVArrayQTy.getAddressSpace();
599 if (llvm::Constant *C =
600 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
601 auto GV = new llvm::GlobalVariable(
602 CGM.getModule(), C->getType(),
603 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
604 "constinit",
605 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
607 Emitter.finalize(GV);
608 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
609 GV->setAlignment(Align.getAsAlign());
610 Address GVAddr(GV, GV->getValueType(), Align);
611 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
612 return;
613 }
614 }
615
616 // Exception safety requires us to destroy all the
617 // already-constructed members if an initializer throws.
618 // For that, we'll need an EH cleanup.
619 QualType::DestructionKind dtorKind = elementType.isDestructedType();
620 Address endOfInit = Address::invalid();
621 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
622
623 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
624 if (dtorKind) {
625 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
626 // In principle we could tell the cleanup where we are more
627 // directly, but the control flow can get so varied here that it
628 // would actually be quite complex. Therefore we go through an
629 // alloca.
630 llvm::Instruction *dominatingIP =
631 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
632 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
633 "arrayinit.endOfInit");
634 Builder.CreateStore(begin, endOfInit);
635 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
636 elementAlign,
637 CGF.getDestroyer(dtorKind));
639 .AddAuxAllocas(allocaTracker.Take());
640
642 {CGF.EHStack.stable_begin(), dominatingIP});
643 }
644
645 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
646
647 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
648 llvm::Value *element = begin;
649 if (ArrayIndex > 0) {
650 element = Builder.CreateInBoundsGEP(
651 llvmElementType, begin,
652 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
653
654 // Tell the cleanup that it needs to destroy up to this
655 // element. TODO: some of these stores can be trivially
656 // observed to be unnecessary.
657 if (endOfInit.isValid())
658 Builder.CreateStore(element, endOfInit);
659 }
660
661 LValue elementLV = CGF.MakeAddrLValue(
662 Address(element, llvmElementType, elementAlign), elementType);
663 EmitInitializationToLValue(Init, elementLV);
664 return true;
665 };
666
667 unsigned ArrayIndex = 0;
668 // Emit the explicit initializers.
669 for (uint64_t i = 0; i != NumInitElements; ++i) {
670 if (ArrayIndex >= NumInitElements)
671 break;
672 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
673 EmbedS->doForEachDataElement(Emit, ArrayIndex);
674 } else {
675 Emit(Args[i], ArrayIndex);
676 ArrayIndex++;
677 }
678 }
679
680 // Check whether there's a non-trivial array-fill expression.
681 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
682
683 // Any remaining elements need to be zero-initialized, possibly
684 // using the filler expression. We can skip this if the we're
685 // emitting to zeroed memory.
686 if (NumInitElements != NumArrayElements &&
687 !(Dest.isZeroed() && hasTrivialFiller &&
688 CGF.getTypes().isZeroInitializable(elementType))) {
689
690 // Use an actual loop. This is basically
691 // do { *array++ = filler; } while (array != end);
692
693 // Advance to the start of the rest of the array.
694 llvm::Value *element = begin;
695 if (NumInitElements) {
696 element = Builder.CreateInBoundsGEP(
697 llvmElementType, element,
698 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
699 "arrayinit.start");
700 if (endOfInit.isValid())
701 Builder.CreateStore(element, endOfInit);
702 }
703
704 // Compute the end of the array.
705 llvm::Value *end = Builder.CreateInBoundsGEP(
706 llvmElementType, begin,
707 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
708
709 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
710 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
711
712 // Jump into the body.
713 CGF.EmitBlock(bodyBB);
714 llvm::PHINode *currentElement =
715 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
716 currentElement->addIncoming(element, entryBB);
717
718 // Emit the actual filler expression.
719 {
720 // C++1z [class.temporary]p5:
721 // when a default constructor is called to initialize an element of
722 // an array with no corresponding initializer [...] the destruction of
723 // every temporary created in a default argument is sequenced before
724 // the construction of the next array element, if any
725 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
726 LValue elementLV = CGF.MakeAddrLValue(
727 Address(currentElement, llvmElementType, elementAlign), elementType);
728 if (ArrayFiller)
729 EmitInitializationToLValue(ArrayFiller, elementLV);
730 else
731 EmitNullInitializationToLValue(elementLV);
732 }
733
734 // Move on to the next element.
735 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
736 llvmElementType, currentElement, one, "arrayinit.next");
737
738 // Tell the EH cleanup that we finished with the last element.
739 if (endOfInit.isValid())
740 Builder.CreateStore(nextElement, endOfInit);
741
742 // Leave the loop if we're done.
743 llvm::Value *done =
744 Builder.CreateICmpEQ(nextElement, end, "arrayinit.done");
745 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
746 Builder.CreateCondBr(done, endBB, bodyBB);
747 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
748
749 CGF.EmitBlock(endBB);
750 }
751}
752
753//===----------------------------------------------------------------------===//
754// Visitor Methods
755//===----------------------------------------------------------------------===//
756
757void AggExprEmitter::VisitMaterializeTemporaryExpr(
758 MaterializeTemporaryExpr *E) {
759 Visit(E->getSubExpr());
760}
761
762void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
763 // If this is a unique OVE, just visit its source expression.
764 if (e->isUnique())
765 Visit(e->getSourceExpr());
766 else
767 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
768}
769
770void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
771 if (Dest.isPotentiallyAliased()) {
772 // Just emit a load of the lvalue + a copy, because our compound literal
773 // might alias the destination.
774 EmitAggLoadOfLValue(E);
775 return;
776 }
777
778 AggValueSlot Slot = EnsureSlot(E->getType());
779
780 // Block-scope compound literals are destroyed at the end of the enclosing
781 // scope in C.
782 bool Destruct =
783 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
784 if (Destruct)
786
787 CGF.EmitAggExpr(E->getInitializer(), Slot);
788
789 if (Destruct)
792 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
793 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
794}
795
796/// Attempt to look through various unimportant expressions to find a
797/// cast of the given kind.
798static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
799 op = op->IgnoreParenNoopCasts(ctx);
800 if (auto castE = dyn_cast<CastExpr>(op)) {
801 if (castE->getCastKind() == kind)
802 return castE->getSubExpr();
803 }
804 return nullptr;
805}
806
807void AggExprEmitter::VisitCastExpr(CastExpr *E) {
808 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
809 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
810 switch (E->getCastKind()) {
811 case CK_Dynamic: {
812 // FIXME: Can this actually happen? We have no test coverage for it.
813 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
814 LValue LV =
816 // FIXME: Do we also need to handle property references here?
817 if (LV.isSimple())
818 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
819 else
820 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
821
822 if (!Dest.isIgnored())
823 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
824 break;
825 }
826
827 case CK_ToUnion: {
828 // Evaluate even if the destination is ignored.
829 if (Dest.isIgnored()) {
831 /*ignoreResult=*/true);
832 break;
833 }
834
835 // GCC union extension
836 QualType Ty = E->getSubExpr()->getType();
837 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
838 EmitInitializationToLValue(E->getSubExpr(),
839 CGF.MakeAddrLValue(CastPtr, Ty));
840 break;
841 }
842
843 case CK_LValueToRValueBitCast: {
844 if (Dest.isIgnored()) {
846 /*ignoreResult=*/true);
847 break;
848 }
849
850 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
851 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
852 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
853 llvm::Value *SizeVal = llvm::ConstantInt::get(
854 CGF.SizeTy,
856 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
857 break;
858 }
859
860 case CK_DerivedToBase:
861 case CK_BaseToDerived:
862 case CK_UncheckedDerivedToBase: {
863 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
864 "should have been unpacked before we got here");
865 }
866
867 case CK_NonAtomicToAtomic:
868 case CK_AtomicToNonAtomic: {
869 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
870
871 // Determine the atomic and value types.
872 QualType atomicType = E->getSubExpr()->getType();
873 QualType valueType = E->getType();
874 if (isToAtomic)
875 std::swap(atomicType, valueType);
876
877 assert(atomicType->isAtomicType());
879 valueType, atomicType->castAs<AtomicType>()->getValueType()));
880
881 // Just recurse normally if we're ignoring the result or the
882 // atomic type doesn't change representation.
883 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
884 return Visit(E->getSubExpr());
885 }
886
887 CastKind peepholeTarget =
888 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
889
890 // These two cases are reverses of each other; try to peephole them.
891 if (Expr *op =
892 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
893 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
894 E->getType()) &&
895 "peephole significantly changed types?");
896 return Visit(op);
897 }
898
899 // If we're converting an r-value of non-atomic type to an r-value
900 // of atomic type, just emit directly into the relevant sub-object.
901 if (isToAtomic) {
902 AggValueSlot valueDest = Dest;
903 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
904 // Zero-initialize. (Strictly speaking, we only need to initialize
905 // the padding at the end, but this is simpler.)
906 if (!Dest.isZeroed())
908
909 // Build a GEP to refer to the subobject.
910 Address valueAddr =
911 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
912 valueDest = AggValueSlot::forAddr(
913 valueAddr, valueDest.getQualifiers(),
914 valueDest.isExternallyDestructed(), valueDest.requiresGCollection(),
917 }
918
919 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
920 return;
921 }
922
923 // Otherwise, we're converting an atomic type to a non-atomic type.
924 // Make an atomic temporary, emit into that, and then copy the value out.
925 AggValueSlot atomicSlot =
926 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
927 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
928
929 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
930 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
931 return EmitFinalDestCopy(valueType, rvalue);
932 }
933 case CK_AddressSpaceConversion:
934 return Visit(E->getSubExpr());
935
936 case CK_LValueToRValue:
937 // If we're loading from a volatile type, force the destination
938 // into existence.
939 if (E->getSubExpr()->getType().isVolatileQualified()) {
940 bool Destruct =
941 !Dest.isExternallyDestructed() &&
943 if (Destruct)
945 EnsureDest(E->getType());
946 Visit(E->getSubExpr());
947
948 if (Destruct)
950 E->getType());
951
952 return;
953 }
954
955 [[fallthrough]];
956
957 case CK_HLSLArrayRValue:
958 Visit(E->getSubExpr());
959 break;
960 case CK_HLSLAggregateSplatCast: {
961 Expr *Src = E->getSubExpr();
962 QualType SrcTy = Src->getType();
963 RValue RV = CGF.EmitAnyExpr(Src);
964 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
965 SourceLocation Loc = E->getExprLoc();
966
967 assert(RV.isScalar() && SrcTy->isScalarType() &&
968 "RHS of HLSL splat cast must be a scalar.");
969 llvm::Value *SrcVal = RV.getScalarVal();
970 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
971 break;
972 }
973 case CK_HLSLElementwiseCast: {
974 Expr *Src = E->getSubExpr();
975 QualType SrcTy = Src->getType();
976 RValue RV = CGF.EmitAnyExpr(Src);
977 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
978 SourceLocation Loc = E->getExprLoc();
979
980 if (RV.isScalar()) {
981 llvm::Value *SrcVal = RV.getScalarVal();
982 assert(SrcTy->isVectorType() &&
983 "HLSL Elementwise cast doesn't handle splatting.");
984 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
985 } else {
986 assert(RV.isAggregate() &&
987 "Can't perform HLSL Aggregate cast on a complex type.");
988 Address SrcVal = RV.getAggregateAddress();
989 EmitHLSLElementwiseCast(CGF, DestLVal, CGF.MakeAddrLValue(SrcVal, SrcTy),
990 Loc);
991 }
992 break;
993 }
994 case CK_NoOp:
995 case CK_UserDefinedConversion:
996 case CK_ConstructorConversion:
998 E->getType()) &&
999 "Implicit cast types must be compatible");
1000 Visit(E->getSubExpr());
1001 break;
1002
1003 case CK_LValueBitCast:
1004 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
1005
1006 case CK_Dependent:
1007 case CK_BitCast:
1008 case CK_ArrayToPointerDecay:
1009 case CK_FunctionToPointerDecay:
1010 case CK_NullToPointer:
1011 case CK_NullToMemberPointer:
1012 case CK_BaseToDerivedMemberPointer:
1013 case CK_DerivedToBaseMemberPointer:
1014 case CK_MemberPointerToBoolean:
1015 case CK_ReinterpretMemberPointer:
1016 case CK_IntegralToPointer:
1017 case CK_PointerToIntegral:
1018 case CK_PointerToBoolean:
1019 case CK_ToVoid:
1020 case CK_VectorSplat:
1021 case CK_IntegralCast:
1022 case CK_BooleanToSignedIntegral:
1023 case CK_IntegralToBoolean:
1024 case CK_IntegralToFloating:
1025 case CK_FloatingToIntegral:
1026 case CK_FloatingToBoolean:
1027 case CK_FloatingCast:
1028 case CK_CPointerToObjCPointerCast:
1029 case CK_BlockPointerToObjCPointerCast:
1030 case CK_AnyPointerToBlockPointerCast:
1031 case CK_ObjCObjectLValueCast:
1032 case CK_FloatingRealToComplex:
1033 case CK_FloatingComplexToReal:
1034 case CK_FloatingComplexToBoolean:
1035 case CK_FloatingComplexCast:
1036 case CK_FloatingComplexToIntegralComplex:
1037 case CK_IntegralRealToComplex:
1038 case CK_IntegralComplexToReal:
1039 case CK_IntegralComplexToBoolean:
1040 case CK_IntegralComplexCast:
1041 case CK_IntegralComplexToFloatingComplex:
1042 case CK_ARCProduceObject:
1043 case CK_ARCConsumeObject:
1044 case CK_ARCReclaimReturnedObject:
1045 case CK_ARCExtendBlockObject:
1046 case CK_CopyAndAutoreleaseBlockObject:
1047 case CK_BuiltinFnToFnPtr:
1048 case CK_ZeroToOCLOpaqueType:
1049 case CK_MatrixCast:
1050 case CK_HLSLVectorTruncation:
1051 case CK_HLSLMatrixTruncation:
1052 case CK_IntToOCLSampler:
1053 case CK_FloatingToFixedPoint:
1054 case CK_FixedPointToFloating:
1055 case CK_FixedPointCast:
1056 case CK_FixedPointToBoolean:
1057 case CK_FixedPointToIntegral:
1058 case CK_IntegralToFixedPoint:
1059 llvm_unreachable("cast kind invalid for aggregate types");
1060 }
1061}
1062
1063void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
1064 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
1065 EmitAggLoadOfLValue(E);
1066 return;
1067 }
1068
1069 withReturnValueSlot(
1070 E, [&](ReturnValueSlot Slot) { return CGF.EmitCallExpr(E, Slot); });
1071}
1072
1073void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
1074 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
1075 return CGF.EmitObjCMessageExpr(E, Slot);
1076 });
1077}
1078
1079void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
1080 CGF.EmitIgnoredExpr(E->getLHS());
1081 Visit(E->getRHS());
1082}
1083
1084void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1085 CodeGenFunction::StmtExprEvaluation eval(CGF);
1086 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
1087}
1088
1094
1095static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
1096 const BinaryOperator *E, llvm::Value *LHS,
1097 llvm::Value *RHS, CompareKind Kind,
1098 const char *NameSuffix = "") {
1099 QualType ArgTy = E->getLHS()->getType();
1100 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1101 ArgTy = CT->getElementType();
1102
1103 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1104 assert(Kind == CK_Equal &&
1105 "member pointers may only be compared for equality");
1107 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1108 }
1109
1110 // Compute the comparison instructions for the specified comparison kind.
1111 struct CmpInstInfo {
1112 const char *Name;
1113 llvm::CmpInst::Predicate FCmp;
1114 llvm::CmpInst::Predicate SCmp;
1115 llvm::CmpInst::Predicate UCmp;
1116 };
1117 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1118 using FI = llvm::FCmpInst;
1119 using II = llvm::ICmpInst;
1120 switch (Kind) {
1121 case CK_Less:
1122 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1123 case CK_Greater:
1124 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1125 case CK_Equal:
1126 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1127 }
1128 llvm_unreachable("Unrecognised CompareKind enum");
1129 }();
1130
1131 if (ArgTy->hasFloatingRepresentation())
1132 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1133 llvm::Twine(InstInfo.Name) + NameSuffix);
1134 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1135 auto Inst =
1136 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1137 return Builder.CreateICmp(Inst, LHS, RHS,
1138 llvm::Twine(InstInfo.Name) + NameSuffix);
1139 }
1140
1141 llvm_unreachable("unsupported aggregate binary expression should have "
1142 "already been handled");
1143}
1144
1145void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1146 using llvm::BasicBlock;
1147 using llvm::PHINode;
1148 using llvm::Value;
1149 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1150 E->getRHS()->getType()));
1151 const ComparisonCategoryInfo &CmpInfo =
1153 assert(CmpInfo.Record->isTriviallyCopyable() &&
1154 "cannot copy non-trivially copyable aggregate");
1155
1156 QualType ArgTy = E->getLHS()->getType();
1157
1158 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1159 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1160 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1161 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1162 }
1163 bool IsComplex = ArgTy->isAnyComplexType();
1164
1165 // Evaluate the operands to the expression and extract their values.
1166 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1167 RValue RV = CGF.EmitAnyExpr(E);
1168 if (RV.isScalar())
1169 return {RV.getScalarVal(), nullptr};
1170 if (RV.isAggregate())
1171 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1172 assert(RV.isComplex());
1173 return RV.getComplexVal();
1174 };
1175 auto LHSValues = EmitOperand(E->getLHS()),
1176 RHSValues = EmitOperand(E->getRHS());
1177
1178 auto EmitCmp = [&](CompareKind K) {
1179 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1180 K, IsComplex ? ".r" : "");
1181 if (!IsComplex)
1182 return Cmp;
1183 assert(K == CompareKind::CK_Equal);
1184 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1185 RHSValues.second, K, ".i");
1186 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1187 };
1188 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1189 return Builder.getInt(VInfo->getIntValue());
1190 };
1191
1192 Value *Select;
1193 if (ArgTy->isNullPtrType()) {
1194 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1195 } else if (!CmpInfo.isPartial()) {
1196 Value *SelectOne =
1197 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1198 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1199 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1200 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1201 SelectOne, "sel.eq");
1202 } else {
1203 Value *SelectEq = Builder.CreateSelect(
1204 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1205 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1206 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1207 EmitCmpRes(CmpInfo.getGreater()),
1208 SelectEq, "sel.gt");
1209 Select = Builder.CreateSelect(
1210 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1211 }
1212 // Create the return value in the destination slot.
1213 EnsureDest(E->getType());
1214 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1215
1216 // Emit the address of the first (and only) field in the comparison category
1217 // type, and initialize it from the constant integer value selected above.
1218 LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1219 DestLV, *CmpInfo.Record->field_begin());
1220 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1221
1222 // All done! The result is in the Dest slot.
1223}
1224
1225void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1226 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1227 VisitPointerToDataMemberBinaryOperator(E);
1228 else
1229 CGF.ErrorUnsupported(E, "aggregate binary expression");
1230}
1231
1232void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1233 const BinaryOperator *E) {
1234 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1235 EmitFinalDestCopy(E->getType(), LV);
1236}
1237
1238/// Is the value of the given expression possibly a reference to or
1239/// into a __block variable?
1240static bool isBlockVarRef(const Expr *E) {
1241 // Make sure we look through parens.
1242 E = E->IgnoreParens();
1243
1244 // Check for a direct reference to a __block variable.
1245 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1246 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1247 return (var && var->hasAttr<BlocksAttr>());
1248 }
1249
1250 // More complicated stuff.
1251
1252 // Binary operators.
1253 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1254 // For an assignment or pointer-to-member operation, just care
1255 // about the LHS.
1256 if (op->isAssignmentOp() || op->isPtrMemOp())
1257 return isBlockVarRef(op->getLHS());
1258
1259 // For a comma, just care about the RHS.
1260 if (op->getOpcode() == BO_Comma)
1261 return isBlockVarRef(op->getRHS());
1262
1263 // FIXME: pointer arithmetic?
1264 return false;
1265
1266 // Check both sides of a conditional operator.
1267 } else if (const AbstractConditionalOperator *op =
1268 dyn_cast<AbstractConditionalOperator>(E)) {
1269 return isBlockVarRef(op->getTrueExpr()) ||
1270 isBlockVarRef(op->getFalseExpr());
1271
1272 // OVEs are required to support BinaryConditionalOperators.
1273 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(E)) {
1274 if (const Expr *src = op->getSourceExpr())
1275 return isBlockVarRef(src);
1276
1277 // Casts are necessary to get things like (*(int*)&var) = foo().
1278 // We don't really care about the kind of cast here, except
1279 // we don't want to look through l2r casts, because it's okay
1280 // to get the *value* in a __block variable.
1281 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1282 if (cast->getCastKind() == CK_LValueToRValue)
1283 return false;
1284 return isBlockVarRef(cast->getSubExpr());
1285
1286 // Handle unary operators. Again, just aggressively look through
1287 // it, ignoring the operation.
1288 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1289 return isBlockVarRef(uop->getSubExpr());
1290
1291 // Look into the base of a field access.
1292 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1293 return isBlockVarRef(mem->getBase());
1294
1295 // Look into the base of a subscript.
1296 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1297 return isBlockVarRef(sub->getBase());
1298 }
1299
1300 return false;
1301}
1302
1303void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1304 ApplyAtomGroup Grp(CGF.getDebugInfo());
1305 // For an assignment to work, the value on the right has
1306 // to be compatible with the value on the left.
1307 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1308 E->getRHS()->getType()) &&
1309 "Invalid assignment");
1310
1311 // If the LHS might be a __block variable, and the RHS can
1312 // potentially cause a block copy, we need to evaluate the RHS first
1313 // so that the assignment goes the right place.
1314 // This is pretty semantically fragile.
1315 if (isBlockVarRef(E->getLHS()) &&
1316 E->getRHS()->HasSideEffects(CGF.getContext())) {
1317 // Ensure that we have a destination, and evaluate the RHS into that.
1318 EnsureDest(E->getRHS()->getType());
1319 Visit(E->getRHS());
1320
1321 // Now emit the LHS and copy into it.
1322 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1323
1324 // That copy is an atomic copy if the LHS is atomic.
1325 if (LHS.getType()->isAtomicType() ||
1327 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1328 return;
1329 }
1330
1331 EmitCopy(E->getLHS()->getType(),
1333 needsGC(E->getLHS()->getType()),
1336 Dest);
1337 return;
1338 }
1339
1340 LValue LHS = CGF.EmitLValue(E->getLHS());
1341
1342 // If we have an atomic type, evaluate into the destination and then
1343 // do an atomic copy.
1344 if (LHS.getType()->isAtomicType() ||
1346 EnsureDest(E->getRHS()->getType());
1347 Visit(E->getRHS());
1348 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1349 return;
1350 }
1351
1352 // Codegen the RHS so that it stores directly into the LHS.
1353 AggValueSlot LHSSlot = AggValueSlot::forLValue(
1354 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1356 // A non-volatile aggregate destination might have volatile member.
1357 if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType()))
1358 LHSSlot.setVolatile(true);
1359
1360 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1361
1362 // Copy into the destination if the assignment isn't ignored.
1363 EmitFinalDestCopy(E->getType(), LHS);
1364
1365 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1368 E->getType());
1369}
1370
1371void AggExprEmitter::VisitAbstractConditionalOperator(
1372 const AbstractConditionalOperator *E) {
1373 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1374 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1375 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1376
1377 // Bind the common expression if necessary.
1378 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1379
1380 CodeGenFunction::ConditionalEvaluation eval(CGF);
1381 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1382 CGF.getProfileCount(E));
1383
1384 // Save whether the destination's lifetime is externally managed.
1385 bool isExternallyDestructed = Dest.isExternallyDestructed();
1386 bool destructNonTrivialCStruct =
1387 !isExternallyDestructed &&
1389 isExternallyDestructed |= destructNonTrivialCStruct;
1390 Dest.setExternallyDestructed(isExternallyDestructed);
1391
1392 eval.begin(CGF);
1393 CGF.EmitBlock(LHSBlock);
1395 Visit(E->getTrueExpr());
1396 eval.end(CGF);
1397
1398 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1399 CGF.Builder.CreateBr(ContBlock);
1400
1401 // If the result of an agg expression is unused, then the emission
1402 // of the LHS might need to create a destination slot. That's fine
1403 // with us, and we can safely emit the RHS into the same slot, but
1404 // we shouldn't claim that it's already being destructed.
1405 Dest.setExternallyDestructed(isExternallyDestructed);
1406
1407 eval.begin(CGF);
1408 CGF.EmitBlock(RHSBlock);
1410 Visit(E->getFalseExpr());
1411 eval.end(CGF);
1412
1413 if (destructNonTrivialCStruct)
1415 E->getType());
1416
1417 CGF.EmitBlock(ContBlock);
1418}
1419
1420void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1421 Visit(CE->getChosenSubExpr());
1422}
1423
1424void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1425 Address ArgValue = Address::invalid();
1426 CGF.EmitVAArg(VE, ArgValue, Dest);
1427
1428 // If EmitVAArg fails, emit an error.
1429 if (!ArgValue.isValid()) {
1430 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1431 return;
1432 }
1433}
1434
1435void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1436 // Ensure that we have a slot, but if we already do, remember
1437 // whether it was externally destructed.
1438 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1439 EnsureDest(E->getType());
1440
1441 // We're going to push a destructor if there isn't already one.
1443
1444 Visit(E->getSubExpr());
1445
1446 // Push that destructor we promised.
1447 if (!wasExternallyDestructed)
1448 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1449}
1450
1451void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1452 AggValueSlot Slot = EnsureSlot(E->getType());
1453 CGF.EmitCXXConstructExpr(E, Slot);
1454}
1455
1456void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1457 const CXXInheritedCtorInitExpr *E) {
1458 AggValueSlot Slot = EnsureSlot(E->getType());
1460 Slot.getAddress(),
1461 E->inheritedFromVBase(), E);
1462}
1463
1464void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1465 AggValueSlot Slot = EnsureSlot(E->getType());
1466 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1467
1468 // We'll need to enter cleanup scopes in case any of the element
1469 // initializers throws an exception or contains branch out of the expressions.
1470 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1471
1472 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1474 e = E->capture_init_end();
1475 i != e; ++i, ++CurField) {
1476 // Emit initialization
1477 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1478 if (CurField->hasCapturedVLAType()) {
1479 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1480 continue;
1481 }
1482
1483 EmitInitializationToLValue(*i, LV);
1484
1485 // Push a destructor if necessary.
1486 if (QualType::DestructionKind DtorKind =
1487 CurField->getType().isDestructedType()) {
1488 assert(LV.isSimple());
1489 if (DtorKind)
1491 CurField->getType(),
1492 CGF.getDestroyer(DtorKind), false);
1493 }
1494 }
1495}
1496
1497void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1498 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1499 Visit(E->getSubExpr());
1500}
1501
1502void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1503 QualType T = E->getType();
1504 AggValueSlot Slot = EnsureSlot(T);
1505 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1506}
1507
1508void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1509 QualType T = E->getType();
1510 AggValueSlot Slot = EnsureSlot(T);
1511 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1512}
1513
1514/// Determine whether the given cast kind is known to always convert values
1515/// with all zero bits in their value representation to values with all zero
1516/// bits in their value representation.
1517static bool castPreservesZero(const CastExpr *CE) {
1518 switch (CE->getCastKind()) {
1519 // No-ops.
1520 case CK_NoOp:
1521 case CK_UserDefinedConversion:
1522 case CK_ConstructorConversion:
1523 case CK_BitCast:
1524 case CK_ToUnion:
1525 case CK_ToVoid:
1526 // Conversions between (possibly-complex) integral, (possibly-complex)
1527 // floating-point, and bool.
1528 case CK_BooleanToSignedIntegral:
1529 case CK_FloatingCast:
1530 case CK_FloatingComplexCast:
1531 case CK_FloatingComplexToBoolean:
1532 case CK_FloatingComplexToIntegralComplex:
1533 case CK_FloatingComplexToReal:
1534 case CK_FloatingRealToComplex:
1535 case CK_FloatingToBoolean:
1536 case CK_FloatingToIntegral:
1537 case CK_IntegralCast:
1538 case CK_IntegralComplexCast:
1539 case CK_IntegralComplexToBoolean:
1540 case CK_IntegralComplexToFloatingComplex:
1541 case CK_IntegralComplexToReal:
1542 case CK_IntegralRealToComplex:
1543 case CK_IntegralToBoolean:
1544 case CK_IntegralToFloating:
1545 // Reinterpreting integers as pointers and vice versa.
1546 case CK_IntegralToPointer:
1547 case CK_PointerToIntegral:
1548 // Language extensions.
1549 case CK_VectorSplat:
1550 case CK_MatrixCast:
1551 case CK_NonAtomicToAtomic:
1552 case CK_AtomicToNonAtomic:
1553 case CK_HLSLVectorTruncation:
1554 case CK_HLSLMatrixTruncation:
1555 case CK_HLSLElementwiseCast:
1556 case CK_HLSLAggregateSplatCast:
1557 return true;
1558
1559 case CK_BaseToDerivedMemberPointer:
1560 case CK_DerivedToBaseMemberPointer:
1561 case CK_MemberPointerToBoolean:
1562 case CK_NullToMemberPointer:
1563 case CK_ReinterpretMemberPointer:
1564 // FIXME: ABI-dependent.
1565 return false;
1566
1567 case CK_AnyPointerToBlockPointerCast:
1568 case CK_BlockPointerToObjCPointerCast:
1569 case CK_CPointerToObjCPointerCast:
1570 case CK_ObjCObjectLValueCast:
1571 case CK_IntToOCLSampler:
1572 case CK_ZeroToOCLOpaqueType:
1573 // FIXME: Check these.
1574 return false;
1575
1576 case CK_FixedPointCast:
1577 case CK_FixedPointToBoolean:
1578 case CK_FixedPointToFloating:
1579 case CK_FixedPointToIntegral:
1580 case CK_FloatingToFixedPoint:
1581 case CK_IntegralToFixedPoint:
1582 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1583 return false;
1584
1585 case CK_AddressSpaceConversion:
1586 case CK_BaseToDerived:
1587 case CK_DerivedToBase:
1588 case CK_Dynamic:
1589 case CK_NullToPointer:
1590 case CK_PointerToBoolean:
1591 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1592 // same representation in all involved address spaces.
1593 return false;
1594
1595 case CK_ARCConsumeObject:
1596 case CK_ARCExtendBlockObject:
1597 case CK_ARCProduceObject:
1598 case CK_ARCReclaimReturnedObject:
1599 case CK_CopyAndAutoreleaseBlockObject:
1600 case CK_ArrayToPointerDecay:
1601 case CK_FunctionToPointerDecay:
1602 case CK_BuiltinFnToFnPtr:
1603 case CK_Dependent:
1604 case CK_LValueBitCast:
1605 case CK_LValueToRValue:
1606 case CK_LValueToRValueBitCast:
1607 case CK_UncheckedDerivedToBase:
1608 case CK_HLSLArrayRValue:
1609 return false;
1610 }
1611 llvm_unreachable("Unhandled clang::CastKind enum");
1612}
1613
1614/// isSimpleZero - If emitting this value will obviously just cause a store of
1615/// zero to memory, return true. This can return false if uncertain, so it just
1616/// handles simple cases.
1617static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1618 E = E->IgnoreParens();
1619 while (auto *CE = dyn_cast<CastExpr>(E)) {
1620 if (!castPreservesZero(CE))
1621 break;
1622 E = CE->getSubExpr()->IgnoreParens();
1623 }
1624
1625 // 0
1626 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1627 return IL->getValue() == 0;
1628 // +0.0
1629 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1630 return FL->getValue().isPosZero();
1631 // int()
1634 return true;
1635 // (int*)0 - Null pointer expressions.
1636 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1637 return ICE->getCastKind() == CK_NullToPointer &&
1639 !E->HasSideEffects(CGF.getContext());
1640 // '\0'
1641 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1642 return CL->getValue() == 0;
1643
1644 // Otherwise, hard case: conservatively return false.
1645 return false;
1646}
1647
1648void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1649 QualType type = LV.getType();
1650 // FIXME: Ignore result?
1651 // FIXME: Are initializers affected by volatile?
1652 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1653 // Storing "i32 0" to a zero'd memory location is a noop.
1654 return;
1656 return EmitNullInitializationToLValue(LV);
1657 } else if (isa<NoInitExpr>(E)) {
1658 // Do nothing.
1659 return;
1660 } else if (type->isReferenceType()) {
1661 RValue RV = CGF.EmitReferenceBindingToExpr(E);
1662 return CGF.EmitStoreThroughLValue(RV, LV);
1663 }
1664
1665 CGF.EmitInitializationToLValue(E, LV, Dest.isZeroed());
1666}
1667
1668void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1669 QualType type = lv.getType();
1670
1671 // If the destination slot is already zeroed out before the aggregate is
1672 // copied into it, we don't have to emit any zeros here.
1673 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1674 return;
1675
1676 if (CGF.hasScalarEvaluationKind(type)) {
1677 // For non-aggregates, we can store the appropriate null constant.
1678 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1679 // Note that the following is not equivalent to
1680 // EmitStoreThroughBitfieldLValue for ARC types.
1681 if (lv.isBitField()) {
1683 } else {
1684 assert(lv.isSimple());
1685 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1686 }
1687 } else {
1688 // There's a potential optimization opportunity in combining
1689 // memsets; that would be easy for arrays, but relatively
1690 // difficult for structures with the current code.
1691 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1692 }
1693}
1694
1695void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1696 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1698 E->getArrayFiller());
1699}
1700
1701void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1702 if (E->hadArrayRangeDesignator())
1703 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1704
1705 if (E->isTransparent())
1706 return Visit(E->getInit(0));
1707
1708 VisitCXXParenListOrInitListExpr(
1709 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1710}
1711
1712void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1713 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1714 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1715#if 0
1716 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1717 // (Length of globals? Chunks of zeroed-out space?).
1718 //
1719 // If we can, prefer a copy from a global; this is a lot less code for long
1720 // globals, and it's easier for the current optimizers to analyze.
1721 if (llvm::Constant *C =
1722 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1723 llvm::GlobalVariable* GV =
1724 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1725 llvm::GlobalValue::InternalLinkage, C, "");
1726 EmitFinalDestCopy(ExprToVisit->getType(),
1727 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1728 return;
1729 }
1730#endif
1731
1732 // HLSL initialization lists in the AST are an expansion which can contain
1733 // side-effecting expressions wrapped in opaque value expressions. To properly
1734 // emit these we need to emit the opaque values before we emit the argument
1735 // expressions themselves. This is a little hacky, but it prevents us needing
1736 // to do a bigger AST-level change for a language feature that we need
1737 // deprecate in the near future. See related HLSL language proposals:
1738 // * 0005-strict-initializer-lists.md
1739 // * https://github.com/microsoft/hlsl-specs/pull/325
1740 if (CGF.getLangOpts().HLSL && isa<InitListExpr>(ExprToVisit))
1742 CGF, cast<InitListExpr>(ExprToVisit));
1743
1744 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1745
1746 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1747
1748 // Handle initialization of an array.
1749 if (ExprToVisit->getType()->isConstantArrayType()) {
1750 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1751 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1752 InitExprs, ArrayFiller);
1753 return;
1754 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1755 // A variable array type that has an initializer can only do empty
1756 // initialization. And because this feature is not exposed as an extension
1757 // in C++, we can safely memset the array memory to zero.
1758 assert(InitExprs.size() == 0 &&
1759 "you can only use an empty initializer with VLAs");
1760 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1761 return;
1762 }
1763
1764 assert(ExprToVisit->getType()->isRecordType() &&
1765 "Only support structs/unions here!");
1766
1767 // Do struct initialization; this code just sets each individual member
1768 // to the approprate value. This makes bitfield support automatic;
1769 // the disadvantage is that the generated code is more difficult for
1770 // the optimizer, especially with bitfields.
1771 unsigned NumInitElements = InitExprs.size();
1772 RecordDecl *record = ExprToVisit->getType()->castAsRecordDecl();
1773
1774 // We'll need to enter cleanup scopes in case any of the element
1775 // initializers throws an exception.
1776 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1777
1778 unsigned curInitIndex = 0;
1779
1780 // Emit initialization of base classes.
1781 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1782 assert(NumInitElements >= CXXRD->getNumBases() &&
1783 "missing initializer for base class");
1784 for (auto &Base : CXXRD->bases()) {
1785 assert(!Base.isVirtual() && "should not see vbases here");
1786 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1788 Dest.getAddress(), CXXRD, BaseRD,
1789 /*isBaseVirtual*/ false);
1790 AggValueSlot AggSlot = AggValueSlot::forAddr(
1791 V, Qualifiers(), AggValueSlot::IsDestructed,
1793 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1794 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1795
1796 if (QualType::DestructionKind dtorKind =
1797 Base.getType().isDestructedType())
1798 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1799 }
1800 }
1801
1802 // Prepare a 'this' for CXXDefaultInitExprs.
1803 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1804
1805 const bool ZeroInitPadding =
1806 CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
1807
1808 if (record->isUnion()) {
1809 // Only initialize one field of a union. The field itself is
1810 // specified by the initializer list.
1811 if (!InitializedFieldInUnion) {
1812 // Empty union; we have nothing to do.
1813
1814#ifndef NDEBUG
1815 // Make sure that it's really an empty and not a failure of
1816 // semantic analysis.
1817 for (const auto *Field : record->fields())
1818 assert(
1819 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1820 "Only unnamed bitfields or anonymous class allowed");
1821#endif
1822 return;
1823 }
1824
1825 // FIXME: volatility
1826 FieldDecl *Field = InitializedFieldInUnion;
1827
1828 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1829 if (NumInitElements) {
1830 // Store the initializer into the field
1831 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1832 if (ZeroInitPadding) {
1833 uint64_t TotalSize = CGF.getContext().toBits(
1834 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1835 uint64_t FieldSize = CGF.getContext().getTypeSize(FieldLoc.getType());
1836 DoZeroInitPadding(FieldSize, TotalSize, nullptr);
1837 }
1838 } else {
1839 // Default-initialize to null.
1840 if (ZeroInitPadding)
1841 EmitNullInitializationToLValue(DestLV);
1842 else
1843 EmitNullInitializationToLValue(FieldLoc);
1844 }
1845 return;
1846 }
1847
1848 // Here we iterate over the fields; this makes it simpler to both
1849 // default-initialize fields and skip over unnamed fields.
1850 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
1851 uint64_t PaddingStart = 0;
1852
1853 for (const auto *field : record->fields()) {
1854 // We're done once we hit the flexible array member.
1855 if (field->getType()->isIncompleteArrayType())
1856 break;
1857
1858 // Always skip anonymous bitfields.
1859 if (field->isUnnamedBitField())
1860 continue;
1861
1862 // We're done if we reach the end of the explicit initializers, we
1863 // have a zeroed object, and the rest of the fields are
1864 // zero-initializable.
1865 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1866 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1867 break;
1868
1869 if (ZeroInitPadding)
1870 DoZeroInitPadding(PaddingStart,
1871 Layout.getFieldOffset(field->getFieldIndex()), field);
1872
1873 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1874 // We never generate write-barries for initialized fields.
1875 LV.setNonGC(true);
1876
1877 if (curInitIndex < NumInitElements) {
1878 // Store the initializer into the field.
1879 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1880 } else {
1881 // We're out of initializers; default-initialize to null
1882 EmitNullInitializationToLValue(LV);
1883 }
1884
1885 // Push a destructor if necessary.
1886 // FIXME: if we have an array of structures, all explicitly
1887 // initialized, we can end up pushing a linear number of cleanups.
1888 if (QualType::DestructionKind dtorKind =
1889 field->getType().isDestructedType()) {
1890 assert(LV.isSimple());
1891 if (dtorKind) {
1893 field->getType(),
1894 CGF.getDestroyer(dtorKind), false);
1895 }
1896 }
1897 }
1898 if (ZeroInitPadding) {
1899 uint64_t TotalSize = CGF.getContext().toBits(
1900 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1901 DoZeroInitPadding(PaddingStart, TotalSize, nullptr);
1902 }
1903}
1904
1905void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart,
1906 uint64_t PaddingEnd,
1907 const FieldDecl *NextField) {
1908
1909 auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) {
1910 CharUnits Start = CGF.getContext().toCharUnitsFromBits(StartBit);
1911 CharUnits End = CGF.getContext().toCharUnitsFromBits(EndBit);
1912 Address Addr = Dest.getAddress().withElementType(CGF.CharTy);
1913 if (!Start.isZero())
1914 Addr = Builder.CreateConstGEP(Addr, Start.getQuantity());
1915 llvm::Constant *SizeVal = Builder.getInt64((End - Start).getQuantity());
1916 CGF.Builder.CreateMemSet(Addr, Builder.getInt8(0), SizeVal, false);
1917 };
1918
1919 if (NextField != nullptr && NextField->isBitField()) {
1920 // For bitfield, zero init StorageSize before storing the bits. So we don't
1921 // need to handle big/little endian.
1922 const CGRecordLayout &RL =
1923 CGF.getTypes().getCGRecordLayout(NextField->getParent());
1924 const CGBitFieldInfo &Info = RL.getBitFieldInfo(NextField);
1925 uint64_t StorageStart = CGF.getContext().toBits(Info.StorageOffset);
1926 if (StorageStart + Info.StorageSize > PaddingStart) {
1927 if (StorageStart > PaddingStart)
1928 InitBytes(PaddingStart, StorageStart);
1929 Address Addr = Dest.getAddress();
1930 if (!Info.StorageOffset.isZero())
1931 Addr = Builder.CreateConstGEP(Addr.withElementType(CGF.CharTy),
1932 Info.StorageOffset.getQuantity());
1933 Addr = Addr.withElementType(
1934 llvm::Type::getIntNTy(CGF.getLLVMContext(), Info.StorageSize));
1935 Builder.CreateStore(Builder.getIntN(Info.StorageSize, 0), Addr);
1936 PaddingStart = StorageStart + Info.StorageSize;
1937 }
1938 return;
1939 }
1940
1941 if (PaddingStart < PaddingEnd)
1942 InitBytes(PaddingStart, PaddingEnd);
1943 if (NextField != nullptr)
1944 PaddingStart =
1945 PaddingEnd + CGF.getContext().getTypeSize(NextField->getType());
1946}
1947
1948void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1949 llvm::Value *outerBegin) {
1950 // Emit the common subexpression.
1951 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1952
1953 Address destPtr = EnsureSlot(E->getType()).getAddress();
1954 uint64_t numElements = E->getArraySize().getZExtValue();
1955
1956 if (!numElements)
1957 return;
1958
1959 // destPtr is an array*. Construct an elementType* by drilling down a level.
1960 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1961 llvm::Value *indices[] = {zero, zero};
1962 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1963 destPtr.emitRawPointer(CGF),
1964 indices, "arrayinit.begin");
1965
1966 // Prepare to special-case multidimensional array initialization: we avoid
1967 // emitting multiple destructor loops in that case.
1968 if (!outerBegin)
1969 outerBegin = begin;
1970 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1971
1972 QualType elementType =
1974 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1975 CharUnits elementAlign =
1976 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1977 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1978
1979 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1980 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1981
1982 // Jump into the body.
1983 CGF.EmitBlock(bodyBB);
1984 llvm::PHINode *index =
1985 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1986 index->addIncoming(zero, entryBB);
1987 llvm::Value *element =
1988 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1989
1990 // Prepare for a cleanup.
1991 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1992 EHScopeStack::stable_iterator cleanup;
1993 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1994 if (outerBegin->getType() != element->getType())
1995 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1996 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1997 elementAlign,
1998 CGF.getDestroyer(dtorKind));
2000 } else {
2001 dtorKind = QualType::DK_none;
2002 }
2003
2004 // Emit the actual filler expression.
2005 {
2006 // Temporaries created in an array initialization loop are destroyed
2007 // at the end of each iteration.
2008 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
2009 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
2010 LValue elementLV = CGF.MakeAddrLValue(
2011 Address(element, llvmElementType, elementAlign), elementType);
2012
2013 if (InnerLoop) {
2014 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
2015 auto elementSlot = AggValueSlot::forLValue(
2016 elementLV, AggValueSlot::IsDestructed,
2019 AggExprEmitter(CGF, elementSlot, false)
2020 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
2021 } else
2022 EmitInitializationToLValue(E->getSubExpr(), elementLV);
2023 }
2024
2025 // Move on to the next element.
2026 llvm::Value *nextIndex = Builder.CreateNUWAdd(
2027 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
2028 index->addIncoming(nextIndex, Builder.GetInsertBlock());
2029
2030 // Leave the loop if we're done.
2031 llvm::Value *done = Builder.CreateICmpEQ(
2032 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
2033 "arrayinit.done");
2034 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
2035 Builder.CreateCondBr(done, endBB, bodyBB);
2036
2037 CGF.EmitBlock(endBB);
2038
2039 // Leave the partial-array cleanup if we entered one.
2040 if (dtorKind)
2041 CGF.DeactivateCleanupBlock(cleanup, index);
2042}
2043
2044void AggExprEmitter::VisitDesignatedInitUpdateExpr(
2045 DesignatedInitUpdateExpr *E) {
2046 AggValueSlot Dest = EnsureSlot(E->getType());
2047
2048 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
2049 EmitInitializationToLValue(E->getBase(), DestLV);
2050 VisitInitListExpr(E->getUpdater());
2051}
2052
2053//===----------------------------------------------------------------------===//
2054// Entry Points into this File
2055//===----------------------------------------------------------------------===//
2056
2057/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
2058/// non-zero bytes that will be stored when outputting the initializer for the
2059/// specified initializer expression.
2061 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
2062 E = MTE->getSubExpr();
2063 E = E->IgnoreParenNoopCasts(CGF.getContext());
2064
2065 // 0 and 0.0 won't require any non-zero stores!
2066 if (isSimpleZero(E, CGF))
2067 return CharUnits::Zero();
2068
2069 // If this is an initlist expr, sum up the size of sizes of the (present)
2070 // elements. If this is something weird, assume the whole thing is non-zero.
2071 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
2072 while (ILE && ILE->isTransparent())
2073 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
2074 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
2075 return CGF.getContext().getTypeSizeInChars(E->getType());
2076
2077 // InitListExprs for structs have to be handled carefully. If there are
2078 // reference members, we need to consider the size of the reference, not the
2079 // referencee. InitListExprs for unions and arrays can't have references.
2080 if (const RecordType *RT = E->getType()->getAsCanonical<RecordType>()) {
2081 if (!RT->isUnionType()) {
2082 RecordDecl *SD = RT->getDecl()->getDefinitionOrSelf();
2083 CharUnits NumNonZeroBytes = CharUnits::Zero();
2084
2085 unsigned ILEElement = 0;
2086 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
2087 while (ILEElement != CXXRD->getNumBases())
2088 NumNonZeroBytes +=
2089 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
2090 for (const auto *Field : SD->fields()) {
2091 // We're done once we hit the flexible array member or run out of
2092 // InitListExpr elements.
2093 if (Field->getType()->isIncompleteArrayType() ||
2094 ILEElement == ILE->getNumInits())
2095 break;
2096 if (Field->isUnnamedBitField())
2097 continue;
2098
2099 const Expr *E = ILE->getInit(ILEElement++);
2100
2101 // Reference values are always non-null and have the width of a pointer.
2102 if (Field->getType()->isReferenceType())
2103 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
2105 else
2106 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
2107 }
2108
2109 return NumNonZeroBytes;
2110 }
2111 }
2112
2113 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2114 CharUnits NumNonZeroBytes = CharUnits::Zero();
2115 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
2116 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
2117 return NumNonZeroBytes;
2118}
2119
2120/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2121/// zeros in it, emit a memset and avoid storing the individual zeros.
2122///
2123static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
2124 CodeGenFunction &CGF) {
2125 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2126 // volatile stores.
2127 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2128 return;
2129
2130 // C++ objects with a user-declared constructor don't need zero'ing.
2131 if (CGF.getLangOpts().CPlusPlus)
2132 if (const RecordType *RT = CGF.getContext()
2134 ->getAsCanonical<RecordType>()) {
2135 const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
2136 if (RD->hasUserDeclaredConstructor())
2137 return;
2138 }
2139
2140 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2141 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2142 if (Size <= CharUnits::fromQuantity(16))
2143 return;
2144
2145 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2146 // we prefer to emit memset + individual stores for the rest.
2147 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2148 if (NumNonZeroBytes * 4 > Size)
2149 return;
2150
2151 // Okay, it seems like a good idea to use an initial memset, emit the call.
2152 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2153
2154 Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
2155 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2156
2157 // Tell the AggExprEmitter that the slot is known zero.
2158 Slot.setZeroed();
2159}
2160
2161/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2162/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2163/// the value of the aggregate expression is not needed. If VolatileDest is
2164/// true, DestPtr cannot be 0.
2166 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2167 "Invalid aggregate expression to emit");
2168 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2169 "slot has bits but no address");
2170
2171 // Optimize the slot if possible.
2172 CheckAggExprForMemSetUse(Slot, E, *this);
2173
2174 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr *>(E));
2175}
2176
2187
2189 const LValue &Src,
2190 ExprValueKind SrcKind) {
2191 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2192 .EmitFinalDestCopy(Type, Src, SrcKind);
2193}
2194
2197 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2199
2200 // Empty fields can overlap earlier fields.
2201 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2203
2204 // If the field lies entirely within the enclosing class's nvsize, its tail
2205 // padding cannot overlap any already-initialized object. (The only subobjects
2206 // with greater addresses that might already be initialized are vbases.)
2207 const RecordDecl *ClassRD = FD->getParent();
2208 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2209 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2210 getContext().getTypeSize(FD->getType()) <=
2211 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2213
2214 // The tail padding may contain values we need to preserve.
2216}
2217
2219 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2220 // If the most-derived object is a field declared with [[no_unique_address]],
2221 // the tail padding of any virtual base could be reused for other subobjects
2222 // of that field's class.
2223 if (IsVirtual)
2225
2226 // Empty bases can overlap earlier bases.
2227 if (BaseRD->isEmpty())
2229
2230 // If the base class is laid out entirely within the nvsize of the derived
2231 // class, its tail padding cannot yet be initialized, so we can issue
2232 // stores at the full width of the base class.
2233 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2234 if (Layout.getBaseClassOffset(BaseRD) +
2235 getContext().getASTRecordLayout(BaseRD).getSize() <=
2236 Layout.getNonVirtualSize())
2238
2239 // The tail padding may contain values we need to preserve.
2241}
2242
2244 AggValueSlot::Overlap_t MayOverlap,
2245 bool isVolatile) {
2246 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2247
2248 Address DestPtr = Dest.getAddress();
2249 Address SrcPtr = Src.getAddress();
2250
2251 if (getLangOpts().CPlusPlus) {
2252 if (const auto *Record = Ty->getAsCXXRecordDecl()) {
2253 assert((Record->hasTrivialCopyConstructor() ||
2254 Record->hasTrivialCopyAssignment() ||
2255 Record->hasTrivialMoveConstructor() ||
2256 Record->hasTrivialMoveAssignment() ||
2257 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2258 "Trying to aggregate-copy a type without a trivial copy/move "
2259 "constructor or assignment operator");
2260 // Ignore empty classes in C++.
2261 if (Record->isEmpty())
2262 return;
2263 }
2264 }
2265
2266 if (getLangOpts().CUDAIsDevice) {
2268 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2269 Src))
2270 return;
2271 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2272 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2273 Src))
2274 return;
2275 }
2276 }
2277
2279 if (CGM.getHLSLRuntime().emitBufferCopy(*this, DestPtr, SrcPtr, Ty))
2280 return;
2281
2282 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2283 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2284 // read from another object that overlaps in anyway the storage of the first
2285 // object, then the overlap shall be exact and the two objects shall have
2286 // qualified or unqualified versions of a compatible type."
2287 //
2288 // memcpy is not defined if the source and destination pointers are exactly
2289 // equal, but other compilers do this optimization, and almost every memcpy
2290 // implementation handles this case safely. If there is a libc that does not
2291 // safely handle this, we can add a target hook.
2292
2293 // Get data size info for this aggregate. Don't copy the tail padding if this
2294 // might be a potentially-overlapping subobject, since the tail padding might
2295 // be occupied by a different object. Otherwise, copying it is fine.
2297 if (MayOverlap)
2298 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
2299 else
2300 TypeInfo = getContext().getTypeInfoInChars(Ty);
2301
2302 llvm::Value *SizeVal = nullptr;
2303 if (TypeInfo.Width.isZero()) {
2304 // But note that getTypeInfo returns 0 for a VLA.
2305 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2306 getContext().getAsArrayType(Ty))) {
2307 QualType BaseEltTy;
2308 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2309 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2310 assert(!TypeInfo.Width.isZero());
2311 SizeVal = Builder.CreateNUWMul(
2312 SizeVal,
2313 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2314 }
2315 }
2316 if (!SizeVal) {
2317 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2318 }
2319
2320 // FIXME: If we have a volatile struct, the optimizer can remove what might
2321 // appear to be `extra' memory ops:
2322 //
2323 // volatile struct { int i; } a, b;
2324 //
2325 // int main() {
2326 // a = b;
2327 // a = b;
2328 // }
2329 //
2330 // we need to use a different call here. We use isVolatile to indicate when
2331 // either the source or the destination is volatile.
2332
2333 DestPtr = DestPtr.withElementType(Int8Ty);
2334 SrcPtr = SrcPtr.withElementType(Int8Ty);
2335
2336 // Don't do any of the memmove_collectable tests if GC isn't set.
2337 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2338 // fall through
2339 } else if (const auto *Record = Ty->getAsRecordDecl()) {
2340 if (Record->hasObjectMember()) {
2341 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2342 SizeVal);
2343 return;
2344 }
2345 } else if (Ty->isArrayType()) {
2346 QualType BaseType = getContext().getBaseElementType(Ty);
2347 if (const auto *Record = BaseType->getAsRecordDecl()) {
2348 if (Record->hasObjectMember()) {
2349 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2350 SizeVal);
2351 return;
2352 }
2353 }
2354 }
2355
2356 auto *Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2357 addInstToCurrentSourceAtom(Inst, nullptr);
2358 emitPFPPostCopyUpdates(DestPtr, SrcPtr, Ty);
2359
2360 // Determine the metadata to describe the position of any padding in this
2361 // memcpy, as well as the TBAA tags for the members of the struct, in case
2362 // the optimizer wishes to expand it in to scalar memory operations.
2363 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2364 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2365
2366 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2367 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2368 Dest.getTBAAInfo(), Src.getTBAAInfo());
2369 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2370 }
2371}
Defines the clang::ASTContext interface.
#define V(N, I)
CompareKind
@ CK_Greater
@ CK_Less
@ CK_Equal
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
static void EmitHLSLScalarElementwiseAndSplatCasts(CodeGenFunction &CGF, LValue DestVal, llvm::Value *SrcVal, QualType SrcTy, SourceLocation Loc)
static bool isTrivialFiller(Expr *e)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
llvm::MachO::Record Record
Definition MachO.h:31
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
static bool isVector(QualType QT, QualType ElementType)
This helper function returns true if QT is a vector type that has element type ElementType.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
llvm::APInt getArraySize() const
Definition Expr.h:5993
OpaqueValueExpr * getCommonExpr() const
Get the common subexpression shared by all initializations (the source array).
Definition Expr.h:5986
Expr * getSubExpr() const
Get the initializer to use for each array element.
Definition Expr.h:5991
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
QualType getElementType() const
Definition TypeBase.h:3742
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool constructsVBase() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1793
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will call.
Definition ExprCXX.h:1789
bool inheritedFromVBase() const
Determine whether the inherited constructor is inherited from a virtual base of the object we constru...
Definition ExprCXX.h:1803
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5182
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5220
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition DeclCXX.cpp:610
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:305
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:215
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition Address.h:233
bool isValid() const
Definition Address.h:177
An aggregate value slot.
Definition CGValue.h:551
void setVolatile(bool flag)
Definition CGValue.h:670
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition CGValue.h:729
NeedsGCBarriers_t requiresGCollection() const
Definition CGValue.h:681
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
void setZeroed(bool V=true)
Definition CGValue.h:721
IsZeroed_t isZeroed() const
Definition CGValue.h:722
Qualifiers getQualifiers() const
Definition CGValue.h:664
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
IsAliased_t isPotentiallyAliased() const
Definition CGValue.h:701
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
IsDestructed_t isExternallyDestructed() const
Definition CGValue.h:657
Overlap_t mayOverlap() const
Definition CGValue.h:705
RValue asRValue() const
Definition CGValue.h:713
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition CGValue.h:687
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:408
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:229
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
void callCStructMoveConstructor(LValue Dst, LValue Src)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushRegularPartialArrayCleanup - Push an EH cleanup to destroy already-constructed elements of the gi...
Definition CGDecl.cpp:2616
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2975
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6444
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7254
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:692
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:7082
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2300
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to destroy already-constructed elements ...
Definition CGDecl.cpp:2600
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2273
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7259
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
Definition CGCall.cpp:1495
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6367
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2472
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
Definition CGDecl.cpp:2325
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
void callCStructCopyConstructor(LValue Dst, LValue Src)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5858
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
Definition CGClass.cpp:214
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:153
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6306
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:224
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2713
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2353
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:557
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:912
void emitPFPPostCopyUpdates(Address DestPtr, Address SrcPtr, QualType Ty)
Copy all PFP fields from SrcPtr to DestPtr while updating signatures, assuming that DestPtr was alrea...
CodeGenTypes & getTypes() const
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7263
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1672
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
Definition CGClass.cpp:2429
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:332
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
llvm::LLVMContext & getLLVMContext()
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1387
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
ASTContext & getContext() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition CGCleanup.h:647
LValue - This represents an lvalue references.
Definition CGValue.h:183
Address getAddress() const
Definition CGValue.h:373
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition CGValue.h:89
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
bool isComplex() const
Definition CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3283
const Expr * getInitializer() const
Definition Expr.h:3636
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3824
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5939
This represents one expression.
Definition Expr.h:112
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const Expr * getSubExpr() const
Definition Expr.h:1065
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5428
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
ArrayRef< Expr * > inits()
Definition Expr.h:5352
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument for this lambda expression.
Definition ExprCXX.h:2107
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition ExprCXX.h:2081
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument for this lambda expression (which initializes the first ca...
Definition ExprCXX.h:2095
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
Expr * getSelectedExpr() const
Definition ExprCXX.h:4640
const Expr * getSubExpr() const
Definition Expr.h:2202
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8472
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2911
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1523
Represents a struct/union/class.
Definition Decl.h:4327
bool hasObjectMember() const
Definition Decl.h:4387
field_range fields() const
Definition Decl.h:4530
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4527
RecordDecl * getDefinitionOrSelf() const
Definition Decl.h:4515
field_iterator field_begin() const
Definition Decl.cpp:5276
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
bool isUnion() const
Definition Decl.h:3928
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition TargetInfo.h:489
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8728
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8724
bool isPointerType() const
Definition TypeBase.h:8625
bool isReferenceType() const
Definition TypeBase.h:8649
bool isScalarType() const
Definition TypeBase.h:9097
bool isVariableArrayType() const
Definition TypeBase.h:8736
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition Type.cpp:5407
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9113
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8760
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2274
bool isMemberPointerType() const
Definition TypeBase.h:8706
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition Type.cpp:5416
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition Type.cpp:2349
bool isVectorType() const
Definition TypeBase.h:8764
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2357
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2929
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
bool isNullPtrType() const
Definition TypeBase.h:9028
bool isRecordType() const
Definition TypeBase.h:8752
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a GCC generic vector type.
Definition TypeBase.h:4183
Definition SPIR.cpp:35
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1341
llvm::json::Array Array
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ Result
The result type of a method or function.
Definition TypeBase.h:905
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * CharTy
char