clang 23.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGDebugInfo.h"
15#include "CGHLSLRuntime.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "EHScopeStack.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclCXX.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/IntrinsicInst.h"
33#include "llvm/IR/Intrinsics.h"
34using namespace clang;
35using namespace CodeGen;
36
37//===----------------------------------------------------------------------===//
38// Aggregate Expression Emitter
39//===----------------------------------------------------------------------===//
40
41namespace {
42class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
43 CodeGenFunction &CGF;
44 CGBuilderTy &Builder;
45 AggValueSlot Dest;
46 bool IsResultUnused;
47
48 AggValueSlot EnsureSlot(QualType T) {
49 if (!Dest.isIgnored())
50 return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored())
55 return;
56 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
57 }
58
59 // Calls `Fn` with a valid return value slot, potentially creating a temporary
60 // to do so. If a temporary is created, an appropriate copy into `Dest` will
61 // be emitted, as will lifetime markers.
62 //
63 // The given function should take a ReturnValueSlot, and return an RValue that
64 // points to said slot.
65 void withReturnValueSlot(const Expr *E,
66 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
67
68 void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd,
69 const FieldDecl *NextField);
70
71public:
72 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
73 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
74 IsResultUnused(IsResultUnused) {}
75
76 //===--------------------------------------------------------------------===//
77 // Utilities
78 //===--------------------------------------------------------------------===//
79
80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81 /// represents a value lvalue, this method emits the address of the lvalue,
82 /// then loads the result into DestPtr.
83 void EmitAggLoadOfLValue(const Expr *E);
84
85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86 /// SrcIsRValue is true if source comes from an RValue.
87 void EmitFinalDestCopy(QualType type, const LValue &src,
90 void EmitFinalDestCopy(QualType type, RValue src);
91 void EmitCopy(QualType type, const AggValueSlot &dest,
92 const AggValueSlot &src);
93
94 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
95 Expr *ExprToVisit, ArrayRef<Expr *> Args,
96 Expr *ArrayFiller);
97
98 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
102 }
103
104 bool TypeRequiresGCollection(QualType T);
105
106 //===--------------------------------------------------------------------===//
107 // Visitor Methods
108 //===--------------------------------------------------------------------===//
109
110 void Visit(Expr *E) {
111 ApplyDebugLocation DL(CGF, E);
112 StmtVisitor<AggExprEmitter>::Visit(E);
113 }
114
115 void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); }
116 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
117 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
118 Visit(GE->getResultExpr());
119 }
120 void VisitCoawaitExpr(CoawaitExpr *E) {
121 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
122 }
123 void VisitCoyieldExpr(CoyieldExpr *E) {
124 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
125 }
126 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
127 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
128 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
129 return Visit(E->getReplacement());
130 }
131
132 void VisitConstantExpr(ConstantExpr *E) {
133 EnsureDest(E->getType());
134
135 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
137 Result, Dest.getAddress(),
138 llvm::TypeSize::getFixed(
139 Dest.getPreferredSize(CGF.getContext(), E->getType())
140 .getQuantity()),
142 return;
143 }
144 return Visit(E->getSubExpr());
145 }
146
147 // l-values.
148 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
149 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
150 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
151 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
152 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
153 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
154 EmitAggLoadOfLValue(E);
155 }
156 void VisitPredefinedExpr(const PredefinedExpr *E) { EmitAggLoadOfLValue(E); }
157
158 // Operators.
159 void VisitCastExpr(CastExpr *E);
160 void VisitCallExpr(const CallExpr *E);
161 void VisitStmtExpr(const StmtExpr *E);
162 void VisitBinaryOperator(const BinaryOperator *BO);
163 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
164 void VisitBinAssign(const BinaryOperator *E);
165 void VisitBinComma(const BinaryOperator *E);
166 void VisitBinCmp(const BinaryOperator *E);
167 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
168 Visit(E->getSemanticForm());
169 }
170
171 void VisitObjCMessageExpr(ObjCMessageExpr *E);
172 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { EmitAggLoadOfLValue(E); }
173
174 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
175 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
176 void VisitChooseExpr(const ChooseExpr *CE);
177 void VisitInitListExpr(InitListExpr *E);
178 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
179 FieldDecl *InitializedFieldInUnion,
180 Expr *ArrayFiller);
181 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
182 llvm::Value *outerBegin = nullptr);
183 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
184 void VisitNoInitExpr(NoInitExpr *E) {} // Do nothing.
185 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
186 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
187 Visit(DAE->getExpr());
188 }
189 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
190 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
191 Visit(DIE->getExpr());
192 }
193 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
194 void VisitCXXConstructExpr(const CXXConstructExpr *E);
195 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
196 void VisitLambdaExpr(LambdaExpr *E);
197 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
198 void VisitExprWithCleanups(ExprWithCleanups *E);
199 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
200 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
201 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
202 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
203
204 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
205 if (E->isGLValue()) {
206 LValue LV = CGF.EmitPseudoObjectLValue(E);
207 return EmitFinalDestCopy(E->getType(), LV);
208 }
209
210 AggValueSlot Slot = EnsureSlot(E->getType());
211 bool NeedsDestruction =
212 !Slot.isExternallyDestructed() &&
214 if (NeedsDestruction)
216 CGF.EmitPseudoObjectRValue(E, Slot);
217 if (NeedsDestruction)
219 E->getType());
220 }
221
222 void VisitVAArgExpr(VAArgExpr *E);
223 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
224 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
225 Expr *ArrayFiller);
226
227 void EmitInitializationToLValue(Expr *E, LValue Address);
228 void EmitNullInitializationToLValue(LValue Address);
229 // case Expr::ChooseExprClass:
230 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
231 void VisitAtomicExpr(AtomicExpr *E) {
232 RValue Res = CGF.EmitAtomicExpr(E);
233 EmitFinalDestCopy(E->getType(), Res);
234 }
235 void VisitPackIndexingExpr(PackIndexingExpr *E) {
236 Visit(E->getSelectedExpr());
237 }
238};
239} // end anonymous namespace.
240
241//===----------------------------------------------------------------------===//
242// Utilities
243//===----------------------------------------------------------------------===//
244
245/// EmitAggLoadOfLValue - Given an expression with aggregate type that
246/// represents a value lvalue, this method emits the address of the lvalue,
247/// then loads the result into DestPtr.
248void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
249 LValue LV = CGF.EmitLValue(E);
250
251 // If the type of the l-value is atomic, then do an atomic load.
252 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
253 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
254 return;
255 }
256
257 EmitFinalDestCopy(E->getType(), LV);
258}
259
260/// True if the given aggregate type requires special GC API calls.
261bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
262 // Only record types have members that might require garbage collection.
263 const auto *Record = T->getAsRecordDecl();
264 if (!Record)
265 return false;
266
267 // Don't mess with non-trivial C++ types.
269 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
270 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
271 return false;
272
273 // Check whether the type has an object member.
274 return Record->hasObjectMember();
275}
276
277void AggExprEmitter::withReturnValueSlot(
278 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
279 QualType RetTy = E->getType();
280 bool RequiresDestruction =
281 !Dest.isExternallyDestructed() &&
283
284 // If it makes no observable difference, save a memcpy + temporary.
285 //
286 // We need to always provide our own temporary if destruction is required.
287 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
288 // its lifetime before we have the chance to emit a proper destructor call.
289 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
290 (RequiresDestruction && Dest.isIgnored());
291
292 Address RetAddr = Address::invalid();
293
294 EHScopeStack::stable_iterator LifetimeEndBlock;
295 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
296 if (!UseTemp) {
297 RetAddr = Dest.getAddress();
298 } else {
299 RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
300 if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
301 LifetimeStartInst =
302 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
303 assert(LifetimeStartInst->getIntrinsicID() ==
304 llvm::Intrinsic::lifetime_start &&
305 "Last insertion wasn't a lifetime.start?");
306
307 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
308 NormalEHLifetimeMarker, RetAddr);
309 LifetimeEndBlock = CGF.EHStack.stable_begin();
310 }
311 }
312
313 RValue Src =
314 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
315 Dest.isExternallyDestructed()));
316
317 if (!UseTemp)
318 return;
319
320 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
321 Src.getAggregatePointer(E->getType(), CGF));
322 EmitFinalDestCopy(E->getType(), Src);
323
324 if (!RequiresDestruction && LifetimeStartInst) {
325 // If there's no dtor to run, the copy was the last use of our temporary.
326 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
327 // eagerly.
328 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
329 CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
330 }
331}
332
333/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
334void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
335 assert(src.isAggregate() && "value must be aggregate value!");
336 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
337 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
338}
339
340/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
341void AggExprEmitter::EmitFinalDestCopy(
342 QualType type, const LValue &src,
343 CodeGenFunction::ExprValueKind SrcValueKind) {
344 // If Dest is ignored, then we're evaluating an aggregate expression
345 // in a context that doesn't care about the result. Note that loads
346 // from volatile l-values force the existence of a non-ignored
347 // destination.
348 if (Dest.isIgnored())
349 return;
350
351 // Copy non-trivial C structs here.
352 LValue DstLV = CGF.MakeAddrLValue(
353 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
354
355 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
356 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
357 if (Dest.isPotentiallyAliased())
358 CGF.callCStructMoveAssignmentOperator(DstLV, src);
359 else
360 CGF.callCStructMoveConstructor(DstLV, src);
361 return;
362 }
363 } else {
364 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
365 if (Dest.isPotentiallyAliased())
366 CGF.callCStructCopyAssignmentOperator(DstLV, src);
367 else
368 CGF.callCStructCopyConstructor(DstLV, src);
369 return;
370 }
371 }
372
373 AggValueSlot srcAgg = AggValueSlot::forLValue(
376 EmitCopy(type, Dest, srcAgg);
377}
378
379/// Perform a copy from the source into the destination.
380///
381/// \param type - the type of the aggregate being copied; qualifiers are
382/// ignored
383void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
384 const AggValueSlot &src) {
385 if (dest.requiresGCollection()) {
386 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
387 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
389 src.getAddress(), size);
390 return;
391 }
392
393 // If the result of the assignment is used, copy the LHS there also.
394 // It's volatile if either side is. Use the minimum alignment of
395 // the two sides.
396 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
397 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
398 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
399 dest.isVolatile() || src.isVolatile());
400}
401
402/// Emit the initializer for a std::initializer_list initialized with a
403/// real initializer list.
404void AggExprEmitter::VisitCXXStdInitializerListExpr(
405 CXXStdInitializerListExpr *E) {
406 // Emit an array containing the elements. The array is externally destructed
407 // if the std::initializer_list object is.
408 ASTContext &Ctx = CGF.getContext();
409 LValue Array = CGF.EmitLValue(E->getSubExpr());
410 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
411 Address ArrayPtr = Array.getAddress();
412
413 const ConstantArrayType *ArrayType =
415 assert(ArrayType && "std::initializer_list constructed from non-array");
416
417 auto *Record = E->getType()->castAsRecordDecl();
418 RecordDecl::field_iterator Field = Record->field_begin();
419 assert(Field != Record->field_end() &&
420 Ctx.hasSameType(Field->getType()->getPointeeType(),
421 ArrayType->getElementType()) &&
422 "Expected std::initializer_list first field to be const E *");
423
424 // Start pointer.
425 AggValueSlot Dest = EnsureSlot(E->getType());
426 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
427 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
428 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
429 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
430 ++Field;
431 assert(Field != Record->field_end() &&
432 "Expected std::initializer_list to have two fields");
433
434 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
435 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
436 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
437 // Length.
438 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
439
440 } else {
441 // End pointer.
442 assert(Field->getType()->isPointerType() &&
443 Ctx.hasSameType(Field->getType()->getPointeeType(),
444 ArrayType->getElementType()) &&
445 "Expected std::initializer_list second field to be const E *");
446 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
447 llvm::Value *IdxEnd[] = {Zero, Size};
448 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
449 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
450 "arrayend");
451 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
452 }
453
454 assert(++Field == Record->field_end() &&
455 "Expected std::initializer_list to only have two fields");
456}
457
458/// Determine if E is a trivial array filler, that is, one that is
459/// equivalent to zero-initialization.
460static bool isTrivialFiller(Expr *E) {
461 if (!E)
462 return true;
463
465 return true;
466
467 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
468 if (ILE->getNumInits())
469 return false;
470 return isTrivialFiller(ILE->getArrayFiller());
471 }
472
473 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
474 return Cons->getConstructor()->isDefaultConstructor() &&
475 Cons->getConstructor()->isTrivial();
476
477 // FIXME: Are there other cases where we can avoid emitting an initializer?
478 return false;
479}
480
481// emit an elementwise cast where the RHS is a scalar or vector
482// or emit an aggregate splat cast
484 LValue DestVal,
485 llvm::Value *SrcVal,
486 QualType SrcTy,
487 SourceLocation Loc) {
488 // Flatten our destination
489 SmallVector<LValue, 16> StoreList;
490 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
491
492 bool isVector = false;
493 if (auto *VT = SrcTy->getAs<VectorType>()) {
494 isVector = true;
495 SrcTy = VT->getElementType();
496 assert(StoreList.size() <= VT->getNumElements() &&
497 "Cannot perform HLSL flat cast when vector source \
498 object has less elements than flattened destination \
499 object.");
500 }
501
502 for (unsigned I = 0, Size = StoreList.size(); I < Size; I++) {
503 LValue DestLVal = StoreList[I];
504 llvm::Value *Load =
505 isVector ? CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load")
506 : SrcVal;
507 llvm::Value *Cast =
508 CGF.EmitScalarConversion(Load, SrcTy, DestLVal.getType(), Loc);
509 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
510 }
511}
512
513// emit a flat cast where the RHS is an aggregate
514static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal,
515 LValue SrcVal, SourceLocation Loc) {
516 // Flatten our destination
517 SmallVector<LValue, 16> StoreList;
518 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
519 // Flatten our src
521 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
522
523 assert(StoreList.size() <= LoadList.size() &&
524 "Cannot perform HLSL elementwise cast when flattened source object \
525 has less elements than flattened destination object.");
526 // apply casts to what we load from LoadList
527 // and store result in Dest
528 for (unsigned I = 0, E = StoreList.size(); I < E; I++) {
529 LValue DestLVal = StoreList[I];
530 LValue SrcLVal = LoadList[I];
531 RValue RVal = CGF.EmitLoadOfLValue(SrcLVal, Loc);
532 assert(RVal.isScalar() && "All flattened source values should be scalars");
533 llvm::Value *Val = RVal.getScalarVal();
534 llvm::Value *Cast = CGF.EmitScalarConversion(Val, SrcLVal.getType(),
535 DestLVal.getType(), Loc);
536 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
537 }
538}
539
540/// Emit initialization of an array from an initializer list. ExprToVisit must
541/// be either an InitListEpxr a CXXParenInitListExpr.
542void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
543 QualType ArrayQTy, Expr *ExprToVisit,
544 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
545 uint64_t NumInitElements = Args.size();
546
547 uint64_t NumArrayElements = AType->getNumElements();
548 for (const auto *Init : Args) {
549 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
550 NumInitElements += Embed->getDataElementCount() - 1;
551 if (NumInitElements > NumArrayElements) {
552 NumInitElements = NumArrayElements;
553 break;
554 }
555 }
556 }
557
558 assert(NumInitElements <= NumArrayElements);
559
560 QualType elementType =
561 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
562 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
563 CharUnits elementAlign =
564 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
565 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
566
567 // Consider initializing the array by copying from a global. For this to be
568 // more efficient than per-element initialization, the size of the elements
569 // with explicit initializers should be large enough.
570 if (NumInitElements * elementSize.getQuantity() > 16 &&
571 elementType.isTriviallyCopyableType(CGF.getContext())) {
572 CodeGen::CodeGenModule &CGM = CGF.CGM;
573 ConstantEmitter Emitter(CGF);
574 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
575 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
577 LangAS AS = GVArrayQTy.getAddressSpace();
578 if (llvm::Constant *C =
579 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
580 auto GV = new llvm::GlobalVariable(
581 CGM.getModule(), C->getType(),
582 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
583 "constinit",
584 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
586 Emitter.finalize(GV);
587 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
588 GV->setAlignment(Align.getAsAlign());
589 Address GVAddr(GV, GV->getValueType(), Align);
590 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
591 return;
592 }
593 }
594
595 // Exception safety requires us to destroy all the
596 // already-constructed members if an initializer throws.
597 // For that, we'll need an EH cleanup.
598 QualType::DestructionKind dtorKind = elementType.isDestructedType();
599 Address endOfInit = Address::invalid();
600 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
601
602 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
603 if (dtorKind) {
604 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
605 // In principle we could tell the cleanup where we are more
606 // directly, but the control flow can get so varied here that it
607 // would actually be quite complex. Therefore we go through an
608 // alloca.
609 llvm::Instruction *dominatingIP =
610 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
611 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
612 "arrayinit.endOfInit");
613 Builder.CreateStore(begin, endOfInit);
614 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
615 elementAlign,
616 CGF.getDestroyer(dtorKind));
618 .AddAuxAllocas(allocaTracker.Take());
619
621 {CGF.EHStack.stable_begin(), dominatingIP});
622 }
623
624 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
625
626 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
627 llvm::Value *element = begin;
628 if (ArrayIndex > 0) {
629 element = Builder.CreateInBoundsGEP(
630 llvmElementType, begin,
631 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
632
633 // Tell the cleanup that it needs to destroy up to this
634 // element. TODO: some of these stores can be trivially
635 // observed to be unnecessary.
636 if (endOfInit.isValid())
637 Builder.CreateStore(element, endOfInit);
638 }
639
640 LValue elementLV = CGF.MakeAddrLValue(
641 Address(element, llvmElementType, elementAlign), elementType);
642 EmitInitializationToLValue(Init, elementLV);
643 return true;
644 };
645
646 unsigned ArrayIndex = 0;
647 // Emit the explicit initializers.
648 for (uint64_t i = 0; i != NumInitElements; ++i) {
649 if (ArrayIndex >= NumInitElements)
650 break;
651 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
652 EmbedS->doForEachDataElement(Emit, ArrayIndex);
653 } else {
654 Emit(Args[i], ArrayIndex);
655 ArrayIndex++;
656 }
657 }
658
659 // Check whether there's a non-trivial array-fill expression.
660 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
661
662 // Any remaining elements need to be zero-initialized, possibly
663 // using the filler expression. We can skip this if the we're
664 // emitting to zeroed memory.
665 if (NumInitElements != NumArrayElements &&
666 !(Dest.isZeroed() && hasTrivialFiller &&
667 CGF.getTypes().isZeroInitializable(elementType))) {
668
669 // Use an actual loop. This is basically
670 // do { *array++ = filler; } while (array != end);
671
672 // Advance to the start of the rest of the array.
673 llvm::Value *element = begin;
674 if (NumInitElements) {
675 element = Builder.CreateInBoundsGEP(
676 llvmElementType, element,
677 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
678 "arrayinit.start");
679 if (endOfInit.isValid())
680 Builder.CreateStore(element, endOfInit);
681 }
682
683 // Compute the end of the array.
684 llvm::Value *end = Builder.CreateInBoundsGEP(
685 llvmElementType, begin,
686 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
687
688 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
689 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
690
691 // Jump into the body.
692 CGF.EmitBlock(bodyBB);
693 llvm::PHINode *currentElement =
694 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
695 currentElement->addIncoming(element, entryBB);
696
697 // Emit the actual filler expression.
698 {
699 // C++1z [class.temporary]p5:
700 // when a default constructor is called to initialize an element of
701 // an array with no corresponding initializer [...] the destruction of
702 // every temporary created in a default argument is sequenced before
703 // the construction of the next array element, if any
704 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
705 LValue elementLV = CGF.MakeAddrLValue(
706 Address(currentElement, llvmElementType, elementAlign), elementType);
707 if (ArrayFiller)
708 EmitInitializationToLValue(ArrayFiller, elementLV);
709 else
710 EmitNullInitializationToLValue(elementLV);
711 }
712
713 // Move on to the next element.
714 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
715 llvmElementType, currentElement, one, "arrayinit.next");
716
717 // Tell the EH cleanup that we finished with the last element.
718 if (endOfInit.isValid())
719 Builder.CreateStore(nextElement, endOfInit);
720
721 // Leave the loop if we're done.
722 llvm::Value *done =
723 Builder.CreateICmpEQ(nextElement, end, "arrayinit.done");
724 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
725 Builder.CreateCondBr(done, endBB, bodyBB);
726 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
727
728 CGF.EmitBlock(endBB);
729 }
730}
731
732//===----------------------------------------------------------------------===//
733// Visitor Methods
734//===----------------------------------------------------------------------===//
735
736void AggExprEmitter::VisitMaterializeTemporaryExpr(
737 MaterializeTemporaryExpr *E) {
738 Visit(E->getSubExpr());
739}
740
741void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
742 // If this is a unique OVE, just visit its source expression.
743 if (e->isUnique())
744 Visit(e->getSourceExpr());
745 else
746 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
747}
748
749void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
750 if (Dest.isPotentiallyAliased()) {
751 // Just emit a load of the lvalue + a copy, because our compound literal
752 // might alias the destination.
753 EmitAggLoadOfLValue(E);
754 return;
755 }
756
757 AggValueSlot Slot = EnsureSlot(E->getType());
758
759 // Block-scope compound literals are destroyed at the end of the enclosing
760 // scope in C.
761 bool Destruct =
762 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
763 if (Destruct)
765
766 CGF.EmitAggExpr(E->getInitializer(), Slot);
767
768 if (Destruct)
771 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
772 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
773}
774
775/// Attempt to look through various unimportant expressions to find a
776/// cast of the given kind.
777static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
778 op = op->IgnoreParenNoopCasts(ctx);
779 if (auto castE = dyn_cast<CastExpr>(op)) {
780 if (castE->getCastKind() == kind)
781 return castE->getSubExpr();
782 }
783 return nullptr;
784}
785
786void AggExprEmitter::VisitCastExpr(CastExpr *E) {
787 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
788 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
789 switch (E->getCastKind()) {
790 case CK_Dynamic: {
791 // FIXME: Can this actually happen? We have no test coverage for it.
792 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
793 LValue LV =
795 // FIXME: Do we also need to handle property references here?
796 if (LV.isSimple())
797 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
798 else
799 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
800
801 if (!Dest.isIgnored())
802 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
803 break;
804 }
805
806 case CK_ToUnion: {
807 // Evaluate even if the destination is ignored.
808 if (Dest.isIgnored()) {
810 /*ignoreResult=*/true);
811 break;
812 }
813
814 // GCC union extension
815 QualType Ty = E->getSubExpr()->getType();
816 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
817 EmitInitializationToLValue(E->getSubExpr(),
818 CGF.MakeAddrLValue(CastPtr, Ty));
819 break;
820 }
821
822 case CK_LValueToRValueBitCast: {
823 if (Dest.isIgnored()) {
825 /*ignoreResult=*/true);
826 break;
827 }
828
829 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
830 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
831 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
832 llvm::Value *SizeVal = llvm::ConstantInt::get(
833 CGF.SizeTy,
835 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
836 break;
837 }
838
839 case CK_DerivedToBase:
840 case CK_BaseToDerived:
841 case CK_UncheckedDerivedToBase: {
842 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
843 "should have been unpacked before we got here");
844 }
845
846 case CK_NonAtomicToAtomic:
847 case CK_AtomicToNonAtomic: {
848 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
849
850 // Determine the atomic and value types.
851 QualType atomicType = E->getSubExpr()->getType();
852 QualType valueType = E->getType();
853 if (isToAtomic)
854 std::swap(atomicType, valueType);
855
856 assert(atomicType->isAtomicType());
858 valueType, atomicType->castAs<AtomicType>()->getValueType()));
859
860 // Just recurse normally if we're ignoring the result or the
861 // atomic type doesn't change representation.
862 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
863 return Visit(E->getSubExpr());
864 }
865
866 CastKind peepholeTarget =
867 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
868
869 // These two cases are reverses of each other; try to peephole them.
870 if (Expr *op =
871 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
872 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
873 E->getType()) &&
874 "peephole significantly changed types?");
875 return Visit(op);
876 }
877
878 // If we're converting an r-value of non-atomic type to an r-value
879 // of atomic type, just emit directly into the relevant sub-object.
880 if (isToAtomic) {
881 AggValueSlot valueDest = Dest;
882 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
883 // Zero-initialize. (Strictly speaking, we only need to initialize
884 // the padding at the end, but this is simpler.)
885 if (!Dest.isZeroed())
887
888 // Build a GEP to refer to the subobject.
889 Address valueAddr =
890 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
891 valueDest = AggValueSlot::forAddr(
892 valueAddr, valueDest.getQualifiers(),
893 valueDest.isExternallyDestructed(), valueDest.requiresGCollection(),
896 }
897
898 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
899 return;
900 }
901
902 // Otherwise, we're converting an atomic type to a non-atomic type.
903 // Make an atomic temporary, emit into that, and then copy the value out.
904 AggValueSlot atomicSlot =
905 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
906 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
907
908 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
909 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
910 return EmitFinalDestCopy(valueType, rvalue);
911 }
912 case CK_AddressSpaceConversion:
913 return Visit(E->getSubExpr());
914
915 case CK_LValueToRValue:
916 // If we're loading from a volatile type, force the destination
917 // into existence.
918 if (E->getSubExpr()->getType().isVolatileQualified()) {
919 bool Destruct =
920 !Dest.isExternallyDestructed() &&
922 if (Destruct)
924 EnsureDest(E->getType());
925 Visit(E->getSubExpr());
926
927 if (Destruct)
929 E->getType());
930
931 return;
932 }
933
934 [[fallthrough]];
935
936 case CK_HLSLArrayRValue:
937 Visit(E->getSubExpr());
938 break;
939 case CK_HLSLAggregateSplatCast: {
940 Expr *Src = E->getSubExpr();
941 QualType SrcTy = Src->getType();
942 RValue RV = CGF.EmitAnyExpr(Src);
943 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
944 SourceLocation Loc = E->getExprLoc();
945
946 assert(RV.isScalar() && SrcTy->isScalarType() &&
947 "RHS of HLSL splat cast must be a scalar.");
948 llvm::Value *SrcVal = RV.getScalarVal();
949 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
950 break;
951 }
952 case CK_HLSLElementwiseCast: {
953 Expr *Src = E->getSubExpr();
954 QualType SrcTy = Src->getType();
955 RValue RV = CGF.EmitAnyExpr(Src);
956 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
957 SourceLocation Loc = E->getExprLoc();
958
959 if (RV.isScalar()) {
960 llvm::Value *SrcVal = RV.getScalarVal();
961 assert(SrcTy->isVectorType() &&
962 "HLSL Elementwise cast doesn't handle splatting.");
963 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
964 } else {
965 assert(RV.isAggregate() &&
966 "Can't perform HLSL Aggregate cast on a complex type.");
967 Address SrcVal = RV.getAggregateAddress();
968 EmitHLSLElementwiseCast(CGF, DestLVal, CGF.MakeAddrLValue(SrcVal, SrcTy),
969 Loc);
970 }
971 break;
972 }
973 case CK_NoOp:
974 case CK_UserDefinedConversion:
975 case CK_ConstructorConversion:
977 E->getType()) &&
978 "Implicit cast types must be compatible");
979 Visit(E->getSubExpr());
980 break;
981
982 case CK_LValueBitCast:
983 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
984
985 case CK_Dependent:
986 case CK_BitCast:
987 case CK_ArrayToPointerDecay:
988 case CK_FunctionToPointerDecay:
989 case CK_NullToPointer:
990 case CK_NullToMemberPointer:
991 case CK_BaseToDerivedMemberPointer:
992 case CK_DerivedToBaseMemberPointer:
993 case CK_MemberPointerToBoolean:
994 case CK_ReinterpretMemberPointer:
995 case CK_IntegralToPointer:
996 case CK_PointerToIntegral:
997 case CK_PointerToBoolean:
998 case CK_ToVoid:
999 case CK_VectorSplat:
1000 case CK_IntegralCast:
1001 case CK_BooleanToSignedIntegral:
1002 case CK_IntegralToBoolean:
1003 case CK_IntegralToFloating:
1004 case CK_FloatingToIntegral:
1005 case CK_FloatingToBoolean:
1006 case CK_FloatingCast:
1007 case CK_CPointerToObjCPointerCast:
1008 case CK_BlockPointerToObjCPointerCast:
1009 case CK_AnyPointerToBlockPointerCast:
1010 case CK_ObjCObjectLValueCast:
1011 case CK_FloatingRealToComplex:
1012 case CK_FloatingComplexToReal:
1013 case CK_FloatingComplexToBoolean:
1014 case CK_FloatingComplexCast:
1015 case CK_FloatingComplexToIntegralComplex:
1016 case CK_IntegralRealToComplex:
1017 case CK_IntegralComplexToReal:
1018 case CK_IntegralComplexToBoolean:
1019 case CK_IntegralComplexCast:
1020 case CK_IntegralComplexToFloatingComplex:
1021 case CK_ARCProduceObject:
1022 case CK_ARCConsumeObject:
1023 case CK_ARCReclaimReturnedObject:
1024 case CK_ARCExtendBlockObject:
1025 case CK_CopyAndAutoreleaseBlockObject:
1026 case CK_BuiltinFnToFnPtr:
1027 case CK_ZeroToOCLOpaqueType:
1028 case CK_MatrixCast:
1029 case CK_HLSLVectorTruncation:
1030 case CK_HLSLMatrixTruncation:
1031 case CK_IntToOCLSampler:
1032 case CK_FloatingToFixedPoint:
1033 case CK_FixedPointToFloating:
1034 case CK_FixedPointCast:
1035 case CK_FixedPointToBoolean:
1036 case CK_FixedPointToIntegral:
1037 case CK_IntegralToFixedPoint:
1038 llvm_unreachable("cast kind invalid for aggregate types");
1039 }
1040}
1041
1042void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
1043 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
1044 EmitAggLoadOfLValue(E);
1045 return;
1046 }
1047
1048 withReturnValueSlot(
1049 E, [&](ReturnValueSlot Slot) { return CGF.EmitCallExpr(E, Slot); });
1050}
1051
1052void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
1053 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
1054 return CGF.EmitObjCMessageExpr(E, Slot);
1055 });
1056}
1057
1058void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
1059 CGF.EmitIgnoredExpr(E->getLHS());
1060 Visit(E->getRHS());
1061}
1062
1063void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1064 CodeGenFunction::StmtExprEvaluation eval(CGF);
1065 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
1066}
1067
1073
1074static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
1075 const BinaryOperator *E, llvm::Value *LHS,
1076 llvm::Value *RHS, CompareKind Kind,
1077 const char *NameSuffix = "") {
1078 QualType ArgTy = E->getLHS()->getType();
1079 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1080 ArgTy = CT->getElementType();
1081
1082 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1083 assert(Kind == CK_Equal &&
1084 "member pointers may only be compared for equality");
1086 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1087 }
1088
1089 // Compute the comparison instructions for the specified comparison kind.
1090 struct CmpInstInfo {
1091 const char *Name;
1092 llvm::CmpInst::Predicate FCmp;
1093 llvm::CmpInst::Predicate SCmp;
1094 llvm::CmpInst::Predicate UCmp;
1095 };
1096 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1097 using FI = llvm::FCmpInst;
1098 using II = llvm::ICmpInst;
1099 switch (Kind) {
1100 case CK_Less:
1101 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1102 case CK_Greater:
1103 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1104 case CK_Equal:
1105 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1106 }
1107 llvm_unreachable("Unrecognised CompareKind enum");
1108 }();
1109
1110 if (ArgTy->hasFloatingRepresentation())
1111 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1112 llvm::Twine(InstInfo.Name) + NameSuffix);
1113 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1114 auto Inst =
1115 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1116 return Builder.CreateICmp(Inst, LHS, RHS,
1117 llvm::Twine(InstInfo.Name) + NameSuffix);
1118 }
1119
1120 llvm_unreachable("unsupported aggregate binary expression should have "
1121 "already been handled");
1122}
1123
1124void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1125 using llvm::BasicBlock;
1126 using llvm::PHINode;
1127 using llvm::Value;
1128 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1129 E->getRHS()->getType()));
1130 const ComparisonCategoryInfo &CmpInfo =
1132 assert(CmpInfo.Record->isTriviallyCopyable() &&
1133 "cannot copy non-trivially copyable aggregate");
1134
1135 QualType ArgTy = E->getLHS()->getType();
1136
1137 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1138 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1139 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1140 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1141 }
1142 bool IsComplex = ArgTy->isAnyComplexType();
1143
1144 // Evaluate the operands to the expression and extract their values.
1145 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1146 RValue RV = CGF.EmitAnyExpr(E);
1147 if (RV.isScalar())
1148 return {RV.getScalarVal(), nullptr};
1149 if (RV.isAggregate())
1150 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1151 assert(RV.isComplex());
1152 return RV.getComplexVal();
1153 };
1154 auto LHSValues = EmitOperand(E->getLHS()),
1155 RHSValues = EmitOperand(E->getRHS());
1156
1157 auto EmitCmp = [&](CompareKind K) {
1158 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1159 K, IsComplex ? ".r" : "");
1160 if (!IsComplex)
1161 return Cmp;
1162 assert(K == CompareKind::CK_Equal);
1163 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1164 RHSValues.second, K, ".i");
1165 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1166 };
1167 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1168 return Builder.getInt(VInfo->getIntValue());
1169 };
1170
1171 Value *Select;
1172 if (ArgTy->isNullPtrType()) {
1173 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1174 } else if (!CmpInfo.isPartial()) {
1175 Value *SelectOne =
1176 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1177 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1178 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1179 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1180 SelectOne, "sel.eq");
1181 } else {
1182 Value *SelectEq = Builder.CreateSelect(
1183 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1184 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1185 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1186 EmitCmpRes(CmpInfo.getGreater()),
1187 SelectEq, "sel.gt");
1188 Select = Builder.CreateSelect(
1189 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1190 }
1191 // Create the return value in the destination slot.
1192 EnsureDest(E->getType());
1193 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1194
1195 // Emit the address of the first (and only) field in the comparison category
1196 // type, and initialize it from the constant integer value selected above.
1197 LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1198 DestLV, *CmpInfo.Record->field_begin());
1199 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1200
1201 // All done! The result is in the Dest slot.
1202}
1203
1204void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1205 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1206 VisitPointerToDataMemberBinaryOperator(E);
1207 else
1208 CGF.ErrorUnsupported(E, "aggregate binary expression");
1209}
1210
1211void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1212 const BinaryOperator *E) {
1213 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1214 EmitFinalDestCopy(E->getType(), LV);
1215}
1216
1217/// Is the value of the given expression possibly a reference to or
1218/// into a __block variable?
1219static bool isBlockVarRef(const Expr *E) {
1220 // Make sure we look through parens.
1221 E = E->IgnoreParens();
1222
1223 // Check for a direct reference to a __block variable.
1224 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1225 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1226 return (var && var->hasAttr<BlocksAttr>());
1227 }
1228
1229 // More complicated stuff.
1230
1231 // Binary operators.
1232 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1233 // For an assignment or pointer-to-member operation, just care
1234 // about the LHS.
1235 if (op->isAssignmentOp() || op->isPtrMemOp())
1236 return isBlockVarRef(op->getLHS());
1237
1238 // For a comma, just care about the RHS.
1239 if (op->getOpcode() == BO_Comma)
1240 return isBlockVarRef(op->getRHS());
1241
1242 // FIXME: pointer arithmetic?
1243 return false;
1244
1245 // Check both sides of a conditional operator.
1246 } else if (const AbstractConditionalOperator *op =
1247 dyn_cast<AbstractConditionalOperator>(E)) {
1248 return isBlockVarRef(op->getTrueExpr()) ||
1249 isBlockVarRef(op->getFalseExpr());
1250
1251 // OVEs are required to support BinaryConditionalOperators.
1252 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(E)) {
1253 if (const Expr *src = op->getSourceExpr())
1254 return isBlockVarRef(src);
1255
1256 // Casts are necessary to get things like (*(int*)&var) = foo().
1257 // We don't really care about the kind of cast here, except
1258 // we don't want to look through l2r casts, because it's okay
1259 // to get the *value* in a __block variable.
1260 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1261 if (cast->getCastKind() == CK_LValueToRValue)
1262 return false;
1263 return isBlockVarRef(cast->getSubExpr());
1264
1265 // Handle unary operators. Again, just aggressively look through
1266 // it, ignoring the operation.
1267 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1268 return isBlockVarRef(uop->getSubExpr());
1269
1270 // Look into the base of a field access.
1271 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1272 return isBlockVarRef(mem->getBase());
1273
1274 // Look into the base of a subscript.
1275 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1276 return isBlockVarRef(sub->getBase());
1277 }
1278
1279 return false;
1280}
1281
1282void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1283 ApplyAtomGroup Grp(CGF.getDebugInfo());
1284 // For an assignment to work, the value on the right has
1285 // to be compatible with the value on the left.
1286 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1287 E->getRHS()->getType()) &&
1288 "Invalid assignment");
1289
1290 // If the LHS might be a __block variable, and the RHS can
1291 // potentially cause a block copy, we need to evaluate the RHS first
1292 // so that the assignment goes the right place.
1293 // This is pretty semantically fragile.
1294 if (isBlockVarRef(E->getLHS()) &&
1295 E->getRHS()->HasSideEffects(CGF.getContext())) {
1296 // Ensure that we have a destination, and evaluate the RHS into that.
1297 EnsureDest(E->getRHS()->getType());
1298 Visit(E->getRHS());
1299
1300 // Now emit the LHS and copy into it.
1301 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1302
1303 // That copy is an atomic copy if the LHS is atomic.
1304 if (LHS.getType()->isAtomicType() ||
1306 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1307 return;
1308 }
1309
1310 EmitCopy(E->getLHS()->getType(),
1312 needsGC(E->getLHS()->getType()),
1315 Dest);
1316 return;
1317 }
1318
1319 LValue LHS = CGF.EmitLValue(E->getLHS());
1320
1321 // If we have an atomic type, evaluate into the destination and then
1322 // do an atomic copy.
1323 if (LHS.getType()->isAtomicType() ||
1325 EnsureDest(E->getRHS()->getType());
1326 Visit(E->getRHS());
1327 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1328 return;
1329 }
1330
1331 // Codegen the RHS so that it stores directly into the LHS.
1332 AggValueSlot LHSSlot = AggValueSlot::forLValue(
1333 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1335 // A non-volatile aggregate destination might have volatile member.
1336 if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType()))
1337 LHSSlot.setVolatile(true);
1338
1339 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1340
1341 // Copy into the destination if the assignment isn't ignored.
1342 EmitFinalDestCopy(E->getType(), LHS);
1343
1344 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1347 E->getType());
1348}
1349
1350void AggExprEmitter::VisitAbstractConditionalOperator(
1351 const AbstractConditionalOperator *E) {
1352 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1353 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1354 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1355
1356 // Bind the common expression if necessary.
1357 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1358
1359 CodeGenFunction::ConditionalEvaluation eval(CGF);
1360 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1361 CGF.getProfileCount(E));
1362
1363 // Save whether the destination's lifetime is externally managed.
1364 bool isExternallyDestructed = Dest.isExternallyDestructed();
1365 bool destructNonTrivialCStruct =
1366 !isExternallyDestructed &&
1368 isExternallyDestructed |= destructNonTrivialCStruct;
1369 Dest.setExternallyDestructed(isExternallyDestructed);
1370
1371 eval.begin(CGF);
1372 CGF.EmitBlock(LHSBlock);
1374 Visit(E->getTrueExpr());
1375 eval.end(CGF);
1376
1377 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1378 CGF.Builder.CreateBr(ContBlock);
1379
1380 // If the result of an agg expression is unused, then the emission
1381 // of the LHS might need to create a destination slot. That's fine
1382 // with us, and we can safely emit the RHS into the same slot, but
1383 // we shouldn't claim that it's already being destructed.
1384 Dest.setExternallyDestructed(isExternallyDestructed);
1385
1386 eval.begin(CGF);
1387 CGF.EmitBlock(RHSBlock);
1389 Visit(E->getFalseExpr());
1390 eval.end(CGF);
1391
1392 if (destructNonTrivialCStruct)
1394 E->getType());
1395
1396 CGF.EmitBlock(ContBlock);
1397}
1398
1399void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1400 Visit(CE->getChosenSubExpr());
1401}
1402
1403void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1404 Address ArgValue = Address::invalid();
1405 CGF.EmitVAArg(VE, ArgValue, Dest);
1406
1407 // If EmitVAArg fails, emit an error.
1408 if (!ArgValue.isValid()) {
1409 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1410 return;
1411 }
1412}
1413
1414void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1415 // Ensure that we have a slot, but if we already do, remember
1416 // whether it was externally destructed.
1417 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1418 EnsureDest(E->getType());
1419
1420 // We're going to push a destructor if there isn't already one.
1422
1423 Visit(E->getSubExpr());
1424
1425 // Push that destructor we promised.
1426 if (!wasExternallyDestructed)
1427 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1428}
1429
1430void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1431 AggValueSlot Slot = EnsureSlot(E->getType());
1432 CGF.EmitCXXConstructExpr(E, Slot);
1433}
1434
1435void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1436 const CXXInheritedCtorInitExpr *E) {
1437 AggValueSlot Slot = EnsureSlot(E->getType());
1439 Slot.getAddress(),
1440 E->inheritedFromVBase(), E);
1441}
1442
1443void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1444 AggValueSlot Slot = EnsureSlot(E->getType());
1445 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1446
1447 // We'll need to enter cleanup scopes in case any of the element
1448 // initializers throws an exception or contains branch out of the expressions.
1449 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1450
1451 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1453 e = E->capture_init_end();
1454 i != e; ++i, ++CurField) {
1455 // Emit initialization
1456 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1457 if (CurField->hasCapturedVLAType()) {
1458 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1459 continue;
1460 }
1461
1462 EmitInitializationToLValue(*i, LV);
1463
1464 // Push a destructor if necessary.
1465 if (QualType::DestructionKind DtorKind =
1466 CurField->getType().isDestructedType()) {
1467 assert(LV.isSimple());
1468 if (DtorKind)
1470 CurField->getType(),
1471 CGF.getDestroyer(DtorKind), false);
1472 }
1473 }
1474}
1475
1476void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1477 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1478 Visit(E->getSubExpr());
1479}
1480
1481void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1482 QualType T = E->getType();
1483 AggValueSlot Slot = EnsureSlot(T);
1484 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1485}
1486
1487void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1488 QualType T = E->getType();
1489 AggValueSlot Slot = EnsureSlot(T);
1490 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1491}
1492
1493/// Determine whether the given cast kind is known to always convert values
1494/// with all zero bits in their value representation to values with all zero
1495/// bits in their value representation.
1496static bool castPreservesZero(const CastExpr *CE) {
1497 switch (CE->getCastKind()) {
1498 // No-ops.
1499 case CK_NoOp:
1500 case CK_UserDefinedConversion:
1501 case CK_ConstructorConversion:
1502 case CK_BitCast:
1503 case CK_ToUnion:
1504 case CK_ToVoid:
1505 // Conversions between (possibly-complex) integral, (possibly-complex)
1506 // floating-point, and bool.
1507 case CK_BooleanToSignedIntegral:
1508 case CK_FloatingCast:
1509 case CK_FloatingComplexCast:
1510 case CK_FloatingComplexToBoolean:
1511 case CK_FloatingComplexToIntegralComplex:
1512 case CK_FloatingComplexToReal:
1513 case CK_FloatingRealToComplex:
1514 case CK_FloatingToBoolean:
1515 case CK_FloatingToIntegral:
1516 case CK_IntegralCast:
1517 case CK_IntegralComplexCast:
1518 case CK_IntegralComplexToBoolean:
1519 case CK_IntegralComplexToFloatingComplex:
1520 case CK_IntegralComplexToReal:
1521 case CK_IntegralRealToComplex:
1522 case CK_IntegralToBoolean:
1523 case CK_IntegralToFloating:
1524 // Reinterpreting integers as pointers and vice versa.
1525 case CK_IntegralToPointer:
1526 case CK_PointerToIntegral:
1527 // Language extensions.
1528 case CK_VectorSplat:
1529 case CK_MatrixCast:
1530 case CK_NonAtomicToAtomic:
1531 case CK_AtomicToNonAtomic:
1532 case CK_HLSLVectorTruncation:
1533 case CK_HLSLMatrixTruncation:
1534 case CK_HLSLElementwiseCast:
1535 case CK_HLSLAggregateSplatCast:
1536 return true;
1537
1538 case CK_BaseToDerivedMemberPointer:
1539 case CK_DerivedToBaseMemberPointer:
1540 case CK_MemberPointerToBoolean:
1541 case CK_NullToMemberPointer:
1542 case CK_ReinterpretMemberPointer:
1543 // FIXME: ABI-dependent.
1544 return false;
1545
1546 case CK_AnyPointerToBlockPointerCast:
1547 case CK_BlockPointerToObjCPointerCast:
1548 case CK_CPointerToObjCPointerCast:
1549 case CK_ObjCObjectLValueCast:
1550 case CK_IntToOCLSampler:
1551 case CK_ZeroToOCLOpaqueType:
1552 // FIXME: Check these.
1553 return false;
1554
1555 case CK_FixedPointCast:
1556 case CK_FixedPointToBoolean:
1557 case CK_FixedPointToFloating:
1558 case CK_FixedPointToIntegral:
1559 case CK_FloatingToFixedPoint:
1560 case CK_IntegralToFixedPoint:
1561 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1562 return false;
1563
1564 case CK_AddressSpaceConversion:
1565 case CK_BaseToDerived:
1566 case CK_DerivedToBase:
1567 case CK_Dynamic:
1568 case CK_NullToPointer:
1569 case CK_PointerToBoolean:
1570 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1571 // same representation in all involved address spaces.
1572 return false;
1573
1574 case CK_ARCConsumeObject:
1575 case CK_ARCExtendBlockObject:
1576 case CK_ARCProduceObject:
1577 case CK_ARCReclaimReturnedObject:
1578 case CK_CopyAndAutoreleaseBlockObject:
1579 case CK_ArrayToPointerDecay:
1580 case CK_FunctionToPointerDecay:
1581 case CK_BuiltinFnToFnPtr:
1582 case CK_Dependent:
1583 case CK_LValueBitCast:
1584 case CK_LValueToRValue:
1585 case CK_LValueToRValueBitCast:
1586 case CK_UncheckedDerivedToBase:
1587 case CK_HLSLArrayRValue:
1588 return false;
1589 }
1590 llvm_unreachable("Unhandled clang::CastKind enum");
1591}
1592
1593/// isSimpleZero - If emitting this value will obviously just cause a store of
1594/// zero to memory, return true. This can return false if uncertain, so it just
1595/// handles simple cases.
1596static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1597 E = E->IgnoreParens();
1598 while (auto *CE = dyn_cast<CastExpr>(E)) {
1599 if (!castPreservesZero(CE))
1600 break;
1601 E = CE->getSubExpr()->IgnoreParens();
1602 }
1603
1604 // 0
1605 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1606 return IL->getValue() == 0;
1607 // +0.0
1608 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1609 return FL->getValue().isPosZero();
1610 // int()
1613 return true;
1614 // (int*)0 - Null pointer expressions.
1615 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1616 return ICE->getCastKind() == CK_NullToPointer &&
1618 !E->HasSideEffects(CGF.getContext());
1619 // '\0'
1620 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1621 return CL->getValue() == 0;
1622
1623 // Otherwise, hard case: conservatively return false.
1624 return false;
1625}
1626
1627void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1628 QualType type = LV.getType();
1629 // FIXME: Ignore result?
1630 // FIXME: Are initializers affected by volatile?
1631 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1632 // Storing "i32 0" to a zero'd memory location is a noop.
1633 return;
1635 return EmitNullInitializationToLValue(LV);
1636 } else if (isa<NoInitExpr>(E)) {
1637 // Do nothing.
1638 return;
1639 } else if (type->isReferenceType()) {
1640 RValue RV = CGF.EmitReferenceBindingToExpr(E);
1641 return CGF.EmitStoreThroughLValue(RV, LV);
1642 }
1643
1644 CGF.EmitInitializationToLValue(E, LV, Dest.isZeroed());
1645}
1646
1647void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1648 QualType type = lv.getType();
1649
1650 // If the destination slot is already zeroed out before the aggregate is
1651 // copied into it, we don't have to emit any zeros here.
1652 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1653 return;
1654
1655 if (CGF.hasScalarEvaluationKind(type)) {
1656 // For non-aggregates, we can store the appropriate null constant.
1657 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1658 // Note that the following is not equivalent to
1659 // EmitStoreThroughBitfieldLValue for ARC types.
1660 if (lv.isBitField()) {
1662 } else {
1663 assert(lv.isSimple());
1664 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1665 }
1666 } else {
1667 // There's a potential optimization opportunity in combining
1668 // memsets; that would be easy for arrays, but relatively
1669 // difficult for structures with the current code.
1670 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1671 }
1672}
1673
1674void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1675 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1677 E->getArrayFiller());
1678}
1679
1680void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1681 if (E->hadArrayRangeDesignator())
1682 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1683
1684 if (E->isTransparent())
1685 return Visit(E->getInit(0));
1686
1687 VisitCXXParenListOrInitListExpr(
1688 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1689}
1690
1691void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1692 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1693 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1694#if 0
1695 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1696 // (Length of globals? Chunks of zeroed-out space?).
1697 //
1698 // If we can, prefer a copy from a global; this is a lot less code for long
1699 // globals, and it's easier for the current optimizers to analyze.
1700 if (llvm::Constant *C =
1701 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1702 llvm::GlobalVariable* GV =
1703 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1704 llvm::GlobalValue::InternalLinkage, C, "");
1705 EmitFinalDestCopy(ExprToVisit->getType(),
1706 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1707 return;
1708 }
1709#endif
1710
1711 // HLSL initialization lists in the AST are an expansion which can contain
1712 // side-effecting expressions wrapped in opaque value expressions. To properly
1713 // emit these we need to emit the opaque values before we emit the argument
1714 // expressions themselves. This is a little hacky, but it prevents us needing
1715 // to do a bigger AST-level change for a language feature that we need
1716 // deprecate in the near future. See related HLSL language proposals:
1717 // * 0005-strict-initializer-lists.md
1718 // * https://github.com/microsoft/hlsl-specs/pull/325
1719 if (CGF.getLangOpts().HLSL && isa<InitListExpr>(ExprToVisit))
1721 CGF, cast<InitListExpr>(ExprToVisit));
1722
1723 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1724
1725 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1726
1727 // Handle initialization of an array.
1728 if (ExprToVisit->getType()->isConstantArrayType()) {
1729 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1730 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1731 InitExprs, ArrayFiller);
1732 return;
1733 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1734 // A variable array type that has an initializer can only do empty
1735 // initialization. And because this feature is not exposed as an extension
1736 // in C++, we can safely memset the array memory to zero.
1737 assert(InitExprs.size() == 0 &&
1738 "you can only use an empty initializer with VLAs");
1739 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1740 return;
1741 }
1742
1743 assert(ExprToVisit->getType()->isRecordType() &&
1744 "Only support structs/unions here!");
1745
1746 // Do struct initialization; this code just sets each individual member
1747 // to the approprate value. This makes bitfield support automatic;
1748 // the disadvantage is that the generated code is more difficult for
1749 // the optimizer, especially with bitfields.
1750 unsigned NumInitElements = InitExprs.size();
1751 RecordDecl *record = ExprToVisit->getType()->castAsRecordDecl();
1752
1753 // We'll need to enter cleanup scopes in case any of the element
1754 // initializers throws an exception.
1755 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1756
1757 unsigned curInitIndex = 0;
1758
1759 // Emit initialization of base classes.
1760 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1761 assert(NumInitElements >= CXXRD->getNumBases() &&
1762 "missing initializer for base class");
1763 for (auto &Base : CXXRD->bases()) {
1764 assert(!Base.isVirtual() && "should not see vbases here");
1765 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1767 Dest.getAddress(), CXXRD, BaseRD,
1768 /*isBaseVirtual*/ false);
1769 AggValueSlot AggSlot = AggValueSlot::forAddr(
1770 V, Qualifiers(), AggValueSlot::IsDestructed,
1772 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1773 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1774
1775 if (QualType::DestructionKind dtorKind =
1776 Base.getType().isDestructedType())
1777 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1778 }
1779 }
1780
1781 // Prepare a 'this' for CXXDefaultInitExprs.
1782 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1783
1784 const bool ZeroInitPadding =
1785 CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
1786
1787 if (record->isUnion()) {
1788 // Only initialize one field of a union. The field itself is
1789 // specified by the initializer list.
1790 if (!InitializedFieldInUnion) {
1791 // Empty union; we have nothing to do.
1792
1793#ifndef NDEBUG
1794 // Make sure that it's really an empty and not a failure of
1795 // semantic analysis.
1796 for (const auto *Field : record->fields())
1797 assert(
1798 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1799 "Only unnamed bitfields or anonymous class allowed");
1800#endif
1801 return;
1802 }
1803
1804 // FIXME: volatility
1805 FieldDecl *Field = InitializedFieldInUnion;
1806
1807 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1808 if (NumInitElements) {
1809 // Store the initializer into the field
1810 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1811 if (ZeroInitPadding) {
1812 uint64_t TotalSize = CGF.getContext().toBits(
1813 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1814 uint64_t FieldSize = CGF.getContext().getTypeSize(FieldLoc.getType());
1815 DoZeroInitPadding(FieldSize, TotalSize, nullptr);
1816 }
1817 } else {
1818 // Default-initialize to null.
1819 if (ZeroInitPadding)
1820 EmitNullInitializationToLValue(DestLV);
1821 else
1822 EmitNullInitializationToLValue(FieldLoc);
1823 }
1824 return;
1825 }
1826
1827 // Here we iterate over the fields; this makes it simpler to both
1828 // default-initialize fields and skip over unnamed fields.
1829 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
1830 uint64_t PaddingStart = 0;
1831
1832 for (const auto *field : record->fields()) {
1833 // We're done once we hit the flexible array member.
1834 if (field->getType()->isIncompleteArrayType())
1835 break;
1836
1837 // Always skip anonymous bitfields.
1838 if (field->isUnnamedBitField())
1839 continue;
1840
1841 // We're done if we reach the end of the explicit initializers, we
1842 // have a zeroed object, and the rest of the fields are
1843 // zero-initializable.
1844 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1845 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1846 break;
1847
1848 if (ZeroInitPadding)
1849 DoZeroInitPadding(PaddingStart,
1850 Layout.getFieldOffset(field->getFieldIndex()), field);
1851
1852 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1853 // We never generate write-barries for initialized fields.
1854 LV.setNonGC(true);
1855
1856 if (curInitIndex < NumInitElements) {
1857 // Store the initializer into the field.
1858 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1859 } else {
1860 // We're out of initializers; default-initialize to null
1861 EmitNullInitializationToLValue(LV);
1862 }
1863
1864 // Push a destructor if necessary.
1865 // FIXME: if we have an array of structures, all explicitly
1866 // initialized, we can end up pushing a linear number of cleanups.
1867 if (QualType::DestructionKind dtorKind =
1868 field->getType().isDestructedType()) {
1869 assert(LV.isSimple());
1870 if (dtorKind) {
1872 field->getType(),
1873 CGF.getDestroyer(dtorKind), false);
1874 }
1875 }
1876 }
1877 if (ZeroInitPadding) {
1878 uint64_t TotalSize = CGF.getContext().toBits(
1879 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1880 DoZeroInitPadding(PaddingStart, TotalSize, nullptr);
1881 }
1882}
1883
1884void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart,
1885 uint64_t PaddingEnd,
1886 const FieldDecl *NextField) {
1887
1888 auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) {
1889 CharUnits Start = CGF.getContext().toCharUnitsFromBits(StartBit);
1890 CharUnits End = CGF.getContext().toCharUnitsFromBits(EndBit);
1891 Address Addr = Dest.getAddress().withElementType(CGF.CharTy);
1892 if (!Start.isZero())
1893 Addr = Builder.CreateConstGEP(Addr, Start.getQuantity());
1894 llvm::Constant *SizeVal = Builder.getInt64((End - Start).getQuantity());
1895 CGF.Builder.CreateMemSet(Addr, Builder.getInt8(0), SizeVal, false);
1896 };
1897
1898 if (NextField != nullptr && NextField->isBitField()) {
1899 // For bitfield, zero init StorageSize before storing the bits. So we don't
1900 // need to handle big/little endian.
1901 const CGRecordLayout &RL =
1902 CGF.getTypes().getCGRecordLayout(NextField->getParent());
1903 const CGBitFieldInfo &Info = RL.getBitFieldInfo(NextField);
1904 uint64_t StorageStart = CGF.getContext().toBits(Info.StorageOffset);
1905 if (StorageStart + Info.StorageSize > PaddingStart) {
1906 if (StorageStart > PaddingStart)
1907 InitBytes(PaddingStart, StorageStart);
1908 Address Addr = Dest.getAddress();
1909 if (!Info.StorageOffset.isZero())
1910 Addr = Builder.CreateConstGEP(Addr.withElementType(CGF.CharTy),
1911 Info.StorageOffset.getQuantity());
1912 Addr = Addr.withElementType(
1913 llvm::Type::getIntNTy(CGF.getLLVMContext(), Info.StorageSize));
1914 Builder.CreateStore(Builder.getIntN(Info.StorageSize, 0), Addr);
1915 PaddingStart = StorageStart + Info.StorageSize;
1916 }
1917 return;
1918 }
1919
1920 if (PaddingStart < PaddingEnd)
1921 InitBytes(PaddingStart, PaddingEnd);
1922 if (NextField != nullptr)
1923 PaddingStart =
1924 PaddingEnd + CGF.getContext().getTypeSize(NextField->getType());
1925}
1926
1927void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1928 llvm::Value *outerBegin) {
1929 // Emit the common subexpression.
1930 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1931
1932 Address destPtr = EnsureSlot(E->getType()).getAddress();
1933 uint64_t numElements = E->getArraySize().getZExtValue();
1934
1935 if (!numElements)
1936 return;
1937
1938 // destPtr is an array*. Construct an elementType* by drilling down a level.
1939 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1940 llvm::Value *indices[] = {zero, zero};
1941 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1942 destPtr.emitRawPointer(CGF),
1943 indices, "arrayinit.begin");
1944
1945 // Prepare to special-case multidimensional array initialization: we avoid
1946 // emitting multiple destructor loops in that case.
1947 if (!outerBegin)
1948 outerBegin = begin;
1949 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1950
1951 QualType elementType =
1953 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1954 CharUnits elementAlign =
1955 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1956 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1957
1958 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1959 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1960
1961 // Jump into the body.
1962 CGF.EmitBlock(bodyBB);
1963 llvm::PHINode *index =
1964 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1965 index->addIncoming(zero, entryBB);
1966 llvm::Value *element =
1967 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1968
1969 // Prepare for a cleanup.
1970 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1971 EHScopeStack::stable_iterator cleanup;
1972 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1973 if (outerBegin->getType() != element->getType())
1974 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1975 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1976 elementAlign,
1977 CGF.getDestroyer(dtorKind));
1979 } else {
1980 dtorKind = QualType::DK_none;
1981 }
1982
1983 // Emit the actual filler expression.
1984 {
1985 // Temporaries created in an array initialization loop are destroyed
1986 // at the end of each iteration.
1987 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1988 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1989 LValue elementLV = CGF.MakeAddrLValue(
1990 Address(element, llvmElementType, elementAlign), elementType);
1991
1992 if (InnerLoop) {
1993 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1994 auto elementSlot = AggValueSlot::forLValue(
1995 elementLV, AggValueSlot::IsDestructed,
1998 AggExprEmitter(CGF, elementSlot, false)
1999 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
2000 } else
2001 EmitInitializationToLValue(E->getSubExpr(), elementLV);
2002 }
2003
2004 // Move on to the next element.
2005 llvm::Value *nextIndex = Builder.CreateNUWAdd(
2006 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
2007 index->addIncoming(nextIndex, Builder.GetInsertBlock());
2008
2009 // Leave the loop if we're done.
2010 llvm::Value *done = Builder.CreateICmpEQ(
2011 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
2012 "arrayinit.done");
2013 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
2014 Builder.CreateCondBr(done, endBB, bodyBB);
2015
2016 CGF.EmitBlock(endBB);
2017
2018 // Leave the partial-array cleanup if we entered one.
2019 if (dtorKind)
2020 CGF.DeactivateCleanupBlock(cleanup, index);
2021}
2022
2023void AggExprEmitter::VisitDesignatedInitUpdateExpr(
2024 DesignatedInitUpdateExpr *E) {
2025 AggValueSlot Dest = EnsureSlot(E->getType());
2026
2027 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
2028 EmitInitializationToLValue(E->getBase(), DestLV);
2029 VisitInitListExpr(E->getUpdater());
2030}
2031
2032//===----------------------------------------------------------------------===//
2033// Entry Points into this File
2034//===----------------------------------------------------------------------===//
2035
2036/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
2037/// non-zero bytes that will be stored when outputting the initializer for the
2038/// specified initializer expression.
2040 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
2041 E = MTE->getSubExpr();
2042 E = E->IgnoreParenNoopCasts(CGF.getContext());
2043
2044 // 0 and 0.0 won't require any non-zero stores!
2045 if (isSimpleZero(E, CGF))
2046 return CharUnits::Zero();
2047
2048 // If this is an initlist expr, sum up the size of sizes of the (present)
2049 // elements. If this is something weird, assume the whole thing is non-zero.
2050 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
2051 while (ILE && ILE->isTransparent())
2052 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
2053 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
2054 return CGF.getContext().getTypeSizeInChars(E->getType());
2055
2056 // InitListExprs for structs have to be handled carefully. If there are
2057 // reference members, we need to consider the size of the reference, not the
2058 // referencee. InitListExprs for unions and arrays can't have references.
2059 if (const RecordType *RT = E->getType()->getAsCanonical<RecordType>()) {
2060 if (!RT->isUnionType()) {
2061 RecordDecl *SD = RT->getDecl()->getDefinitionOrSelf();
2062 CharUnits NumNonZeroBytes = CharUnits::Zero();
2063
2064 unsigned ILEElement = 0;
2065 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
2066 while (ILEElement != CXXRD->getNumBases())
2067 NumNonZeroBytes +=
2068 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
2069 for (const auto *Field : SD->fields()) {
2070 // We're done once we hit the flexible array member or run out of
2071 // InitListExpr elements.
2072 if (Field->getType()->isIncompleteArrayType() ||
2073 ILEElement == ILE->getNumInits())
2074 break;
2075 if (Field->isUnnamedBitField())
2076 continue;
2077
2078 const Expr *E = ILE->getInit(ILEElement++);
2079
2080 // Reference values are always non-null and have the width of a pointer.
2081 if (Field->getType()->isReferenceType())
2082 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
2084 else
2085 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
2086 }
2087
2088 return NumNonZeroBytes;
2089 }
2090 }
2091
2092 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2093 CharUnits NumNonZeroBytes = CharUnits::Zero();
2094 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
2095 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
2096 return NumNonZeroBytes;
2097}
2098
2099/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2100/// zeros in it, emit a memset and avoid storing the individual zeros.
2101///
2102static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
2103 CodeGenFunction &CGF) {
2104 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2105 // volatile stores.
2106 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2107 return;
2108
2109 // C++ objects with a user-declared constructor don't need zero'ing.
2110 if (CGF.getLangOpts().CPlusPlus)
2111 if (const RecordType *RT = CGF.getContext()
2113 ->getAsCanonical<RecordType>()) {
2114 const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
2115 if (RD->hasUserDeclaredConstructor())
2116 return;
2117 }
2118
2119 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2120 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2121 if (Size <= CharUnits::fromQuantity(16))
2122 return;
2123
2124 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2125 // we prefer to emit memset + individual stores for the rest.
2126 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2127 if (NumNonZeroBytes * 4 > Size)
2128 return;
2129
2130 // Okay, it seems like a good idea to use an initial memset, emit the call.
2131 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2132
2133 Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
2134 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2135
2136 // Tell the AggExprEmitter that the slot is known zero.
2137 Slot.setZeroed();
2138}
2139
2140/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2141/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2142/// the value of the aggregate expression is not needed. If VolatileDest is
2143/// true, DestPtr cannot be 0.
2145 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2146 "Invalid aggregate expression to emit");
2147 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2148 "slot has bits but no address");
2149
2150 // Optimize the slot if possible.
2151 CheckAggExprForMemSetUse(Slot, E, *this);
2152
2153 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr *>(E));
2154}
2155
2166
2168 const LValue &Src,
2169 ExprValueKind SrcKind) {
2170 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2171 .EmitFinalDestCopy(Type, Src, SrcKind);
2172}
2173
2176 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2178
2179 // Empty fields can overlap earlier fields.
2180 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2182
2183 // If the field lies entirely within the enclosing class's nvsize, its tail
2184 // padding cannot overlap any already-initialized object. (The only subobjects
2185 // with greater addresses that might already be initialized are vbases.)
2186 const RecordDecl *ClassRD = FD->getParent();
2187 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2188 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2189 getContext().getTypeSize(FD->getType()) <=
2190 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2192
2193 // The tail padding may contain values we need to preserve.
2195}
2196
2198 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2199 // If the most-derived object is a field declared with [[no_unique_address]],
2200 // the tail padding of any virtual base could be reused for other subobjects
2201 // of that field's class.
2202 if (IsVirtual)
2204
2205 // Empty bases can overlap earlier bases.
2206 if (BaseRD->isEmpty())
2208
2209 // If the base class is laid out entirely within the nvsize of the derived
2210 // class, its tail padding cannot yet be initialized, so we can issue
2211 // stores at the full width of the base class.
2212 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2213 if (Layout.getBaseClassOffset(BaseRD) +
2214 getContext().getASTRecordLayout(BaseRD).getSize() <=
2215 Layout.getNonVirtualSize())
2217
2218 // The tail padding may contain values we need to preserve.
2220}
2221
2223 AggValueSlot::Overlap_t MayOverlap,
2224 bool isVolatile) {
2225 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2226
2227 Address DestPtr = Dest.getAddress();
2228 Address SrcPtr = Src.getAddress();
2229
2230 if (getLangOpts().CPlusPlus) {
2231 if (const auto *Record = Ty->getAsCXXRecordDecl()) {
2232 assert((Record->hasTrivialCopyConstructor() ||
2233 Record->hasTrivialCopyAssignment() ||
2234 Record->hasTrivialMoveConstructor() ||
2235 Record->hasTrivialMoveAssignment() ||
2236 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2237 "Trying to aggregate-copy a type without a trivial copy/move "
2238 "constructor or assignment operator");
2239 // Ignore empty classes in C++.
2240 if (Record->isEmpty())
2241 return;
2242 }
2243 }
2244
2245 if (getLangOpts().CUDAIsDevice) {
2247 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2248 Src))
2249 return;
2250 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2251 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2252 Src))
2253 return;
2254 }
2255 }
2256
2258 if (CGM.getHLSLRuntime().emitBufferCopy(*this, DestPtr, SrcPtr, Ty))
2259 return;
2260
2261 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2262 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2263 // read from another object that overlaps in anyway the storage of the first
2264 // object, then the overlap shall be exact and the two objects shall have
2265 // qualified or unqualified versions of a compatible type."
2266 //
2267 // memcpy is not defined if the source and destination pointers are exactly
2268 // equal, but other compilers do this optimization, and almost every memcpy
2269 // implementation handles this case safely. If there is a libc that does not
2270 // safely handle this, we can add a target hook.
2271
2272 // Get data size info for this aggregate. Don't copy the tail padding if this
2273 // might be a potentially-overlapping subobject, since the tail padding might
2274 // be occupied by a different object. Otherwise, copying it is fine.
2276 if (MayOverlap)
2277 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
2278 else
2279 TypeInfo = getContext().getTypeInfoInChars(Ty);
2280
2281 llvm::Value *SizeVal = nullptr;
2282 if (TypeInfo.Width.isZero()) {
2283 // But note that getTypeInfo returns 0 for a VLA.
2284 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2285 getContext().getAsArrayType(Ty))) {
2286 QualType BaseEltTy;
2287 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2288 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2289 assert(!TypeInfo.Width.isZero());
2290 SizeVal = Builder.CreateNUWMul(
2291 SizeVal,
2292 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2293 }
2294 }
2295 if (!SizeVal) {
2296 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2297 }
2298
2299 // FIXME: If we have a volatile struct, the optimizer can remove what might
2300 // appear to be `extra' memory ops:
2301 //
2302 // volatile struct { int i; } a, b;
2303 //
2304 // int main() {
2305 // a = b;
2306 // a = b;
2307 // }
2308 //
2309 // we need to use a different call here. We use isVolatile to indicate when
2310 // either the source or the destination is volatile.
2311
2312 DestPtr = DestPtr.withElementType(Int8Ty);
2313 SrcPtr = SrcPtr.withElementType(Int8Ty);
2314
2315 // Don't do any of the memmove_collectable tests if GC isn't set.
2316 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2317 // fall through
2318 } else if (const auto *Record = Ty->getAsRecordDecl()) {
2319 if (Record->hasObjectMember()) {
2320 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2321 SizeVal);
2322 return;
2323 }
2324 } else if (Ty->isArrayType()) {
2325 QualType BaseType = getContext().getBaseElementType(Ty);
2326 if (const auto *Record = BaseType->getAsRecordDecl()) {
2327 if (Record->hasObjectMember()) {
2328 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2329 SizeVal);
2330 return;
2331 }
2332 }
2333 }
2334
2335 auto *Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2336 addInstToCurrentSourceAtom(Inst, nullptr);
2337
2338 // Determine the metadata to describe the position of any padding in this
2339 // memcpy, as well as the TBAA tags for the members of the struct, in case
2340 // the optimizer wishes to expand it in to scalar memory operations.
2341 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2342 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2343
2344 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2345 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2346 Dest.getTBAAInfo(), Src.getTBAAInfo());
2347 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2348 }
2349}
Defines the clang::ASTContext interface.
#define V(N, I)
CompareKind
@ CK_Greater
@ CK_Less
@ CK_Equal
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
static void EmitHLSLScalarElementwiseAndSplatCasts(CodeGenFunction &CGF, LValue DestVal, llvm::Value *SrcVal, QualType SrcTy, SourceLocation Loc)
static bool isTrivialFiller(Expr *e)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
llvm::MachO::Record Record
Definition MachO.h:31
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
static bool isVector(QualType QT, QualType ElementType)
This helper function returns true if QT is a vector type that has element type ElementType.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
llvm::APInt getArraySize() const
Definition Expr.h:5990
OpaqueValueExpr * getCommonExpr() const
Get the common subexpression shared by all initializations (the source array).
Definition Expr.h:5983
Expr * getSubExpr() const
Get the initializer to use for each array element.
Definition Expr.h:5988
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
QualType getElementType() const
Definition TypeBase.h:3735
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
Expr * getRHS() const
Definition Expr.h:4090
Opcode getOpcode() const
Definition Expr.h:4083
CXXTemporary * getTemporary()
Definition ExprCXX.h:1511
const Expr * getSubExpr() const
Definition ExprCXX.h:1515
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool constructsVBase() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1792
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will call.
Definition ExprCXX.h:1788
bool inheritedFromVBase() const
Determine whether the inherited constructor is inherited from a virtual base of the object we constru...
Definition ExprCXX.h:1802
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5181
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5219
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition DeclCXX.cpp:607
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CastKind getCastKind() const
Definition Expr.h:3720
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
bool isValid() const
Definition Address.h:177
An aggregate value slot.
Definition CGValue.h:551
void setVolatile(bool flag)
Definition CGValue.h:670
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition CGValue.h:729
NeedsGCBarriers_t requiresGCollection() const
Definition CGValue.h:681
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
void setZeroed(bool V=true)
Definition CGValue.h:721
IsZeroed_t isZeroed() const
Definition CGValue.h:722
Qualifiers getQualifiers() const
Definition CGValue.h:664
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
IsAliased_t isPotentiallyAliased() const
Definition CGValue.h:701
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
IsDestructed_t isExternallyDestructed() const
Definition CGValue.h:657
Overlap_t mayOverlap() const
Definition CGValue.h:705
RValue asRValue() const
Definition CGValue.h:713
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition CGValue.h:687
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:223
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
Definition CGCall.cpp:1400
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void callCStructMoveConstructor(LValue Dst, LValue Src)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushRegularPartialArrayCleanup - Push an EH cleanup to destroy already-constructed elements of the gi...
Definition CGDecl.cpp:2609
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2905
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6345
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7101
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:688
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:6931
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2293
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to destroy already-constructed elements ...
Definition CGDecl.cpp:2593
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2266
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7106
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6248
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
Definition CGDecl.cpp:2318
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
void callCStructCopyConstructor(LValue Dst, LValue Src)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5738
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
Definition CGClass.cpp:214
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:154
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6187
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:218
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2643
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2346
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:901
CodeGenTypes & getTypes() const
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7110
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1657
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
Definition CGClass.cpp:2413
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:326
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
llvm::LLVMContext & getLLVMContext()
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1372
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
ASTContext & getContext() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition CGCleanup.h:647
LValue - This represents an lvalue references.
Definition CGValue.h:183
Address getAddress() const
Definition CGValue.h:373
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition CGValue.h:89
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
bool isComplex() const
Definition CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
const Expr * getInitializer() const
Definition Expr.h:3633
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3817
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5936
This represents one expression.
Definition Expr.h:112
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const Expr * getSubExpr() const
Definition Expr.h:1062
Describes an C or C++ initializer list.
Definition Expr.h:5299
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2461
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5425
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5401
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
ArrayRef< Expr * > inits()
Definition Expr.h:5349
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument for this lambda expression.
Definition ExprCXX.h:2106
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition ExprCXX.h:2080
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument for this lambda expression (which initializes the first ca...
Definition ExprCXX.h:2094
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1517
Represents a struct/union/class.
Definition Decl.h:4324
bool hasObjectMember() const
Definition Decl.h:4384
field_range fields() const
Definition Decl.h:4527
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4524
RecordDecl * getDefinitionOrSelf() const
Definition Decl.h:4512
field_iterator field_begin() const
Definition Decl.cpp:5270
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
bool isUnion() const
Definition Decl.h:3925
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition TargetInfo.h:489
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8632
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8628
bool isPointerType() const
Definition TypeBase.h:8529
bool isReferenceType() const
Definition TypeBase.h:8553
bool isScalarType() const
Definition TypeBase.h:8993
bool isVariableArrayType() const
Definition TypeBase.h:8640
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition Type.cpp:5327
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9009
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isMemberPointerType() const
Definition TypeBase.h:8610
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition Type.cpp:5336
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition Type.cpp:2313
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2922
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isNullPtrType() const
Definition TypeBase.h:8928
bool isRecordType() const
Definition TypeBase.h:8656
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a GCC generic vector type.
Definition TypeBase.h:4176
Definition SPIR.cpp:35
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1325
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * CharTy
char