clang 23.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGDebugInfo.h"
15#include "CGHLSLRuntime.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "EHScopeStack.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclCXX.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/IntrinsicInst.h"
33#include "llvm/IR/Intrinsics.h"
34using namespace clang;
35using namespace CodeGen;
36
37//===----------------------------------------------------------------------===//
38// Aggregate Expression Emitter
39//===----------------------------------------------------------------------===//
40
41namespace {
42class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
43 CodeGenFunction &CGF;
44 CGBuilderTy &Builder;
45 AggValueSlot Dest;
46 bool IsResultUnused;
47
48 AggValueSlot EnsureSlot(QualType T) {
49 if (!Dest.isIgnored())
50 return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored())
55 return;
56 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
57 }
58
59 // Calls `Fn` with a valid return value slot, potentially creating a temporary
60 // to do so. If a temporary is created, an appropriate copy into `Dest` will
61 // be emitted, as will lifetime markers.
62 //
63 // The given function should take a ReturnValueSlot, and return an RValue that
64 // points to said slot.
65 void withReturnValueSlot(const Expr *E,
66 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
67
68 void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd,
69 const FieldDecl *NextField);
70
71public:
72 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
73 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
74 IsResultUnused(IsResultUnused) {}
75
76 //===--------------------------------------------------------------------===//
77 // Utilities
78 //===--------------------------------------------------------------------===//
79
80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81 /// represents a value lvalue, this method emits the address of the lvalue,
82 /// then loads the result into DestPtr.
83 void EmitAggLoadOfLValue(const Expr *E);
84
85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86 /// SrcIsRValue is true if source comes from an RValue.
87 void EmitFinalDestCopy(QualType type, const LValue &src,
90 void EmitFinalDestCopy(QualType type, RValue src);
91 void EmitCopy(QualType type, const AggValueSlot &dest,
92 const AggValueSlot &src);
93
94 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
95 Expr *ExprToVisit, ArrayRef<Expr *> Args,
96 Expr *ArrayFiller);
97
98 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
102 }
103
104 bool TypeRequiresGCollection(QualType T);
105
106 //===--------------------------------------------------------------------===//
107 // Visitor Methods
108 //===--------------------------------------------------------------------===//
109
110 void Visit(Expr *E) {
111 ApplyDebugLocation DL(CGF, E);
112 StmtVisitor<AggExprEmitter>::Visit(E);
113 }
114
115 void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); }
116 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
117 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
118 Visit(GE->getResultExpr());
119 }
120 void VisitCoawaitExpr(CoawaitExpr *E) {
121 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
122 }
123 void VisitCoyieldExpr(CoyieldExpr *E) {
124 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
125 }
126 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
127 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
128 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
129 return Visit(E->getReplacement());
130 }
131
132 void VisitConstantExpr(ConstantExpr *E) {
133 EnsureDest(E->getType());
134
135 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
137 Result, E->getType(), Dest.getAddress(),
138 llvm::TypeSize::getFixed(
139 Dest.getPreferredSize(CGF.getContext(), E->getType())
140 .getQuantity()),
142 return;
143 }
144 return Visit(E->getSubExpr());
145 }
146
147 // l-values.
148 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
149 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
150 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
151 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
152 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
153 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
154 EmitAggLoadOfLValue(E);
155 }
156 void VisitPredefinedExpr(const PredefinedExpr *E) { EmitAggLoadOfLValue(E); }
157
158 // Operators.
159 void VisitCastExpr(CastExpr *E);
160 void VisitCallExpr(const CallExpr *E);
161 void VisitStmtExpr(const StmtExpr *E);
162 void VisitBinaryOperator(const BinaryOperator *BO);
163 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
164 void VisitBinAssign(const BinaryOperator *E);
165 void VisitBinComma(const BinaryOperator *E);
166 void VisitBinCmp(const BinaryOperator *E);
167 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
168 Visit(E->getSemanticForm());
169 }
170
171 void VisitObjCMessageExpr(ObjCMessageExpr *E);
172 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { EmitAggLoadOfLValue(E); }
173
174 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
175 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
176 void VisitChooseExpr(const ChooseExpr *CE);
177 void VisitInitListExpr(InitListExpr *E);
178 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
179 FieldDecl *InitializedFieldInUnion,
180 Expr *ArrayFiller);
181 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
182 llvm::Value *outerBegin = nullptr);
183 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
184 void VisitNoInitExpr(NoInitExpr *E) {} // Do nothing.
185 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
186 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
187 Visit(DAE->getExpr());
188 }
189 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
190 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
191 Visit(DIE->getExpr());
192 }
193 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
194 void VisitCXXConstructExpr(const CXXConstructExpr *E);
195 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
196 void VisitLambdaExpr(LambdaExpr *E);
197 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
198 void VisitExprWithCleanups(ExprWithCleanups *E);
199 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
200 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
201 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
202 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
203
204 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
205 if (E->isGLValue()) {
206 LValue LV = CGF.EmitPseudoObjectLValue(E);
207 return EmitFinalDestCopy(E->getType(), LV);
208 }
209
210 AggValueSlot Slot = EnsureSlot(E->getType());
211 bool NeedsDestruction =
212 !Slot.isExternallyDestructed() &&
214 if (NeedsDestruction)
216 CGF.EmitPseudoObjectRValue(E, Slot);
217 if (NeedsDestruction)
219 E->getType());
220 }
221
222 void VisitVAArgExpr(VAArgExpr *E);
223 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
224 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
225 Expr *ArrayFiller);
226
227 void EmitInitializationToLValue(Expr *E, LValue Address);
228 void EmitNullInitializationToLValue(LValue Address);
229 // case Expr::ChooseExprClass:
230 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
231 void VisitAtomicExpr(AtomicExpr *E) {
232 RValue Res = CGF.EmitAtomicExpr(E);
233 EmitFinalDestCopy(E->getType(), Res);
234 }
235 void VisitPackIndexingExpr(PackIndexingExpr *E) {
236 Visit(E->getSelectedExpr());
237 }
238};
239} // end anonymous namespace.
240
241//===----------------------------------------------------------------------===//
242// Utilities
243//===----------------------------------------------------------------------===//
244
245/// EmitAggLoadOfLValue - Given an expression with aggregate type that
246/// represents a value lvalue, this method emits the address of the lvalue,
247/// then loads the result into DestPtr.
248void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
249 LValue LV = CGF.EmitLValue(E);
250
251 // If the type of the l-value is atomic, then do an atomic load.
252 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
253 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
254 return;
255 }
256
257 EmitFinalDestCopy(E->getType(), LV);
258}
259
260/// True if the given aggregate type requires special GC API calls.
261bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
262 // Only record types have members that might require garbage collection.
263 const auto *Record = T->getAsRecordDecl();
264 if (!Record)
265 return false;
266
267 // Don't mess with non-trivial C++ types.
269 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
270 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
271 return false;
272
273 // Check whether the type has an object member.
274 return Record->hasObjectMember();
275}
276
277void AggExprEmitter::withReturnValueSlot(
278 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
279 QualType RetTy = E->getType();
280 bool RequiresDestruction =
281 !Dest.isExternallyDestructed() &&
283
284 // If it makes no observable difference, save a memcpy + temporary.
285 //
286 // We need to always provide our own temporary if destruction is required.
287 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
288 // its lifetime before we have the chance to emit a proper destructor call.
289 //
290 // We also need a temporary if the destination is in a different address space
291 // from the sret AS. Use the target hook to get the actual sret AS for this
292 // return type.
293 const CXXRecordDecl *RD = RetTy->getAsCXXRecordDecl();
294 LangAS SRetLangAS = CGF.CGM.getTargetCodeGenInfo().getSRetAddrSpace(RD);
295 unsigned SRetAS = CGF.getContext().getTargetAddressSpace(SRetLangAS);
296 bool CanAggregateCopy =
297 RD ? (RD->hasTrivialCopyConstructor() ||
299 RD->hasTrivialMoveAssignment() || RD->hasAttr<TrivialABIAttr>() ||
300 RD->isUnion())
301 : RetTy.isTriviallyCopyableType(CGF.getContext());
302 bool DestASMismatch = !Dest.isIgnored() && CanAggregateCopy &&
303 Dest.getAddress()
305 ->stripPointerCasts()
306 ->getType()
307 ->getPointerAddressSpace() != SRetAS;
308 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
309 (RequiresDestruction && Dest.isIgnored()) || DestASMismatch;
310
311 Address RetAddr = Address::invalid();
312
313 EHScopeStack::stable_iterator LifetimeEndBlock;
314 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
315 if (!UseTemp) {
316 RetAddr = Dest.getAddress();
317 if (RetAddr.isValid() && RetAddr.getAddressSpace() != SRetAS) {
318 llvm::Type *SRetPtrTy =
319 llvm::PointerType::get(CGF.getLLVMContext(), SRetAS);
320 RetAddr = RetAddr.withPointer(
321 CGF.performAddrSpaceCast(RetAddr.getBasePointer(), SRetPtrTy),
322 RetAddr.isKnownNonNull());
323 }
324 } else {
325 RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
326 if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
327 LifetimeStartInst =
328 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
329 assert(LifetimeStartInst->getIntrinsicID() ==
330 llvm::Intrinsic::lifetime_start &&
331 "Last insertion wasn't a lifetime.start?");
332
333 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
334 NormalEHLifetimeMarker, RetAddr);
335 LifetimeEndBlock = CGF.EHStack.stable_begin();
336 }
337 }
338
339 RValue Src =
340 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
341 Dest.isExternallyDestructed()));
342
343 if (!UseTemp)
344 return;
345
346 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
347 Src.getAggregatePointer(E->getType(), CGF));
348 EmitFinalDestCopy(E->getType(), Src);
349
350 if (!RequiresDestruction && LifetimeStartInst) {
351 // If there's no dtor to run, the copy was the last use of our temporary.
352 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
353 // eagerly.
354 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
355 CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
356 }
357}
358
359/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
360void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
361 assert(src.isAggregate() && "value must be aggregate value!");
362 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
363 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
364}
365
366/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
367void AggExprEmitter::EmitFinalDestCopy(
368 QualType type, const LValue &src,
369 CodeGenFunction::ExprValueKind SrcValueKind) {
370 // If Dest is ignored, then we're evaluating an aggregate expression
371 // in a context that doesn't care about the result. Note that loads
372 // from volatile l-values force the existence of a non-ignored
373 // destination.
374 if (Dest.isIgnored())
375 return;
376
377 // Copy non-trivial C structs here.
378 LValue DstLV = CGF.MakeAddrLValue(
379 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
380
381 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
382 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
383 if (Dest.isPotentiallyAliased())
384 CGF.callCStructMoveAssignmentOperator(DstLV, src);
385 else
386 CGF.callCStructMoveConstructor(DstLV, src);
387 return;
388 }
389 } else {
390 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
391 if (Dest.isPotentiallyAliased())
392 CGF.callCStructCopyAssignmentOperator(DstLV, src);
393 else
394 CGF.callCStructCopyConstructor(DstLV, src);
395 return;
396 }
397 }
398
399 AggValueSlot srcAgg = AggValueSlot::forLValue(
402 EmitCopy(type, Dest, srcAgg);
403}
404
405/// Perform a copy from the source into the destination.
406///
407/// \param type - the type of the aggregate being copied; qualifiers are
408/// ignored
409void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
410 const AggValueSlot &src) {
411 if (dest.requiresGCollection()) {
412 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
413 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
415 src.getAddress(), size);
416 return;
417 }
418
419 // If the result of the assignment is used, copy the LHS there also.
420 // It's volatile if either side is. Use the minimum alignment of
421 // the two sides.
422 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
423 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
424 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
425 dest.isVolatile() || src.isVolatile());
426}
427
428/// Emit the initializer for a std::initializer_list initialized with a
429/// real initializer list.
430void AggExprEmitter::VisitCXXStdInitializerListExpr(
431 CXXStdInitializerListExpr *E) {
432 // Emit an array containing the elements. The array is externally destructed
433 // if the std::initializer_list object is.
434 ASTContext &Ctx = CGF.getContext();
435 LValue Array = CGF.EmitLValue(E->getSubExpr());
436 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
437 Address ArrayPtr = Array.getAddress();
438
439 const ConstantArrayType *ArrayType =
441 assert(ArrayType && "std::initializer_list constructed from non-array");
442
443 auto *Record = E->getType()->castAsRecordDecl();
444 RecordDecl::field_iterator Field = Record->field_begin();
445 assert(Field != Record->field_end() &&
446 Ctx.hasSameType(Field->getType()->getPointeeType(),
447 ArrayType->getElementType()) &&
448 "Expected std::initializer_list first field to be const E *");
449
450 // Start pointer.
451 AggValueSlot Dest = EnsureSlot(E->getType());
452 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
453 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
454 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
455 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
456 ++Field;
457 assert(Field != Record->field_end() &&
458 "Expected std::initializer_list to have two fields");
459
460 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
461 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
462 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
463 // Length.
464 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
465
466 } else {
467 // End pointer.
468 assert(Field->getType()->isPointerType() &&
469 Ctx.hasSameType(Field->getType()->getPointeeType(),
470 ArrayType->getElementType()) &&
471 "Expected std::initializer_list second field to be const E *");
472 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
473 llvm::Value *IdxEnd[] = {Zero, Size};
474 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
475 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
476 "arrayend");
477 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
478 }
479
480 assert(++Field == Record->field_end() &&
481 "Expected std::initializer_list to only have two fields");
482}
483
484/// Determine if E is a trivial array filler, that is, one that is
485/// equivalent to zero-initialization.
486static bool isTrivialFiller(Expr *E) {
487 if (!E)
488 return true;
489
491 return true;
492
493 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
494 if (ILE->getNumInits())
495 return false;
496 return isTrivialFiller(ILE->getArrayFiller());
497 }
498
499 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
500 return Cons->getConstructor()->isDefaultConstructor() &&
501 Cons->getConstructor()->isTrivial();
502
503 // FIXME: Are there other cases where we can avoid emitting an initializer?
504 return false;
505}
506
507// emit an elementwise cast where the RHS is a scalar or vector
508// or emit an aggregate splat cast
510 LValue DestVal,
511 llvm::Value *SrcVal,
512 QualType SrcTy,
513 SourceLocation Loc) {
514 // Flatten our destination
515 SmallVector<LValue, 16> StoreList;
516 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
517
518 bool isVector = false;
519 if (auto *VT = SrcTy->getAs<VectorType>()) {
520 isVector = true;
521 SrcTy = VT->getElementType();
522 assert(StoreList.size() <= VT->getNumElements() &&
523 "Cannot perform HLSL flat cast when vector source \
524 object has less elements than flattened destination \
525 object.");
526 }
527
528 for (unsigned I = 0, Size = StoreList.size(); I < Size; I++) {
529 LValue DestLVal = StoreList[I];
530 llvm::Value *Load =
531 isVector ? CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load")
532 : SrcVal;
533 llvm::Value *Cast =
534 CGF.EmitScalarConversion(Load, SrcTy, DestLVal.getType(), Loc);
535 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
536 }
537}
538
539// emit a flat cast where the RHS is an aggregate
540static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal,
541 LValue SrcVal, SourceLocation Loc) {
542 // Flatten our destination
543 SmallVector<LValue, 16> StoreList;
544 CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
545 // Flatten our src
547 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
548
549 assert(StoreList.size() <= LoadList.size() &&
550 "Cannot perform HLSL elementwise cast when flattened source object \
551 has less elements than flattened destination object.");
552 // apply casts to what we load from LoadList
553 // and store result in Dest
554 for (unsigned I = 0, E = StoreList.size(); I < E; I++) {
555 LValue DestLVal = StoreList[I];
556 LValue SrcLVal = LoadList[I];
557 RValue RVal = CGF.EmitLoadOfLValue(SrcLVal, Loc);
558 assert(RVal.isScalar() && "All flattened source values should be scalars");
559 llvm::Value *Val = RVal.getScalarVal();
560 llvm::Value *Cast = CGF.EmitScalarConversion(Val, SrcLVal.getType(),
561 DestLVal.getType(), Loc);
562 CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
563 }
564}
565
566/// Emit initialization of an array from an initializer list. ExprToVisit must
567/// be either an InitListEpxr a CXXParenInitListExpr.
568void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
569 QualType ArrayQTy, Expr *ExprToVisit,
570 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
571 uint64_t NumInitElements = Args.size();
572
573 uint64_t NumArrayElements = AType->getNumElements();
574 for (const auto *Init : Args) {
575 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
576 NumInitElements += Embed->getDataElementCount() - 1;
577 if (NumInitElements > NumArrayElements) {
578 NumInitElements = NumArrayElements;
579 break;
580 }
581 }
582 }
583
584 assert(NumInitElements <= NumArrayElements);
585
586 QualType elementType =
587 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
588 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
589 CharUnits elementAlign =
590 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
591 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
592
593 // Consider initializing the array by copying from a global. For this to be
594 // more efficient than per-element initialization, the size of the elements
595 // with explicit initializers should be large enough.
596 if (NumInitElements * elementSize.getQuantity() > 16 &&
597 elementType.isTriviallyCopyableType(CGF.getContext())) {
598 CodeGen::CodeGenModule &CGM = CGF.CGM;
599 ConstantEmitter Emitter(CGF);
600 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
601 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
603 LangAS AS = GVArrayQTy.getAddressSpace();
604 if (llvm::Constant *C =
605 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
606 auto GV = new llvm::GlobalVariable(
607 CGM.getModule(), C->getType(),
608 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
609 "constinit",
610 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
612 Emitter.finalize(GV);
613 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
614 GV->setAlignment(Align.getAsAlign());
615 Address GVAddr(GV, GV->getValueType(), Align);
616 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
617 return;
618 }
619 }
620
621 // Exception safety requires us to destroy all the
622 // already-constructed members if an initializer throws.
623 // For that, we'll need an EH cleanup.
624 QualType::DestructionKind dtorKind = elementType.isDestructedType();
625 Address endOfInit = Address::invalid();
626 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
627
628 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
629 if (dtorKind) {
630 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
631 // In principle we could tell the cleanup where we are more
632 // directly, but the control flow can get so varied here that it
633 // would actually be quite complex. Therefore we go through an
634 // alloca.
635 llvm::Instruction *dominatingIP =
636 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
637 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
638 "arrayinit.endOfInit");
639 Builder.CreateStore(begin, endOfInit);
640 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
641 elementAlign,
642 CGF.getDestroyer(dtorKind));
644 .AddAuxAllocas(allocaTracker.Take());
645
647 {CGF.EHStack.stable_begin(), dominatingIP});
648 }
649
650 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
651
652 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
653 llvm::Value *element = begin;
654 if (ArrayIndex > 0) {
655 if (CGF.getLangOpts().EmitLogicalPointer)
656 element = Builder.CreateStructuredGEP(
657 AType, begin, llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex),
658 "arrayinit.element");
659 else
660 element = Builder.CreateInBoundsGEP(
661 llvmElementType, begin,
662 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex),
663 "arrayinit.element");
664
665 // Tell the cleanup that it needs to destroy up to this
666 // element. TODO: some of these stores can be trivially
667 // observed to be unnecessary.
668 if (endOfInit.isValid())
669 Builder.CreateStore(element, endOfInit);
670 }
671
672 LValue elementLV = CGF.MakeAddrLValue(
673 Address(element, llvmElementType, elementAlign), elementType);
674 EmitInitializationToLValue(Init, elementLV);
675 return true;
676 };
677
678 unsigned ArrayIndex = 0;
679 // Emit the explicit initializers.
680 for (uint64_t i = 0; i != NumInitElements; ++i) {
681 if (ArrayIndex >= NumInitElements)
682 break;
683 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
684 EmbedS->doForEachDataElement(Emit, ArrayIndex);
685 } else {
686 Emit(Args[i], ArrayIndex);
687 ArrayIndex++;
688 }
689 }
690
691 // Check whether there's a non-trivial array-fill expression.
692 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
693
694 // Any remaining elements need to be zero-initialized, possibly
695 // using the filler expression. We can skip this if the we're
696 // emitting to zeroed memory.
697 if (NumInitElements != NumArrayElements &&
698 !(Dest.isZeroed() && hasTrivialFiller &&
699 CGF.getTypes().isZeroInitializable(elementType))) {
700
701 // Use an actual loop. This is basically
702 // do { *array++ = filler; } while (array != end);
703
704 // Advance to the start of the rest of the array.
705 llvm::Value *element = begin;
706 if (NumInitElements) {
707 element = Builder.CreateInBoundsGEP(
708 llvmElementType, element,
709 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
710 "arrayinit.start");
711 if (endOfInit.isValid())
712 Builder.CreateStore(element, endOfInit);
713 }
714
715 // Compute the end of the array.
716 llvm::Value *end = Builder.CreateInBoundsGEP(
717 llvmElementType, begin,
718 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
719
720 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
721 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
722
723 // Jump into the body.
724 CGF.EmitBlock(bodyBB);
725 llvm::PHINode *currentElement =
726 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
727 currentElement->addIncoming(element, entryBB);
728
729 // Emit the actual filler expression.
730 {
731 // C++1z [class.temporary]p5:
732 // when a default constructor is called to initialize an element of
733 // an array with no corresponding initializer [...] the destruction of
734 // every temporary created in a default argument is sequenced before
735 // the construction of the next array element, if any
736 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
737 LValue elementLV = CGF.MakeAddrLValue(
738 Address(currentElement, llvmElementType, elementAlign), elementType);
739 if (ArrayFiller)
740 EmitInitializationToLValue(ArrayFiller, elementLV);
741 else
742 EmitNullInitializationToLValue(elementLV);
743 }
744
745 // Move on to the next element.
746 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
747 llvmElementType, currentElement, one, "arrayinit.next");
748
749 // Tell the EH cleanup that we finished with the last element.
750 if (endOfInit.isValid())
751 Builder.CreateStore(nextElement, endOfInit);
752
753 // Leave the loop if we're done.
754 llvm::Value *done =
755 Builder.CreateICmpEQ(nextElement, end, "arrayinit.done");
756 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
757 Builder.CreateCondBr(done, endBB, bodyBB);
758 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
759
760 CGF.EmitBlock(endBB);
761 }
762}
763
764//===----------------------------------------------------------------------===//
765// Visitor Methods
766//===----------------------------------------------------------------------===//
767
768void AggExprEmitter::VisitMaterializeTemporaryExpr(
769 MaterializeTemporaryExpr *E) {
770 Visit(E->getSubExpr());
771}
772
773void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
774 // If this is a unique OVE, just visit its source expression.
775 if (e->isUnique())
776 Visit(e->getSourceExpr());
777 else
778 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
779}
780
781void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
782 if (Dest.isPotentiallyAliased()) {
783 // Just emit a load of the lvalue + a copy, because our compound literal
784 // might alias the destination.
785 EmitAggLoadOfLValue(E);
786 return;
787 }
788
789 AggValueSlot Slot = EnsureSlot(E->getType());
790
791 // Block-scope compound literals are destroyed at the end of the enclosing
792 // scope in C.
793 bool Destruct =
794 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
795 if (Destruct)
797
798 CGF.EmitAggExpr(E->getInitializer(), Slot);
799
800 if (Destruct)
803 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
804 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
805}
806
807/// Attempt to look through various unimportant expressions to find a
808/// cast of the given kind.
809static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
810 op = op->IgnoreParenNoopCasts(ctx);
811 if (auto castE = dyn_cast<CastExpr>(op)) {
812 if (castE->getCastKind() == kind)
813 return castE->getSubExpr();
814 }
815 return nullptr;
816}
817
818void AggExprEmitter::VisitCastExpr(CastExpr *E) {
819 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
820 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
821 switch (E->getCastKind()) {
822 case CK_Dynamic: {
823 // FIXME: Can this actually happen? We have no test coverage for it.
824 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
825 LValue LV =
827 // FIXME: Do we also need to handle property references here?
828 if (LV.isSimple())
829 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
830 else
831 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
832
833 if (!Dest.isIgnored())
834 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
835 break;
836 }
837
838 case CK_ToUnion: {
839 // Evaluate even if the destination is ignored.
840 if (Dest.isIgnored()) {
842 /*ignoreResult=*/true);
843 break;
844 }
845
846 // GCC union extension
847 QualType Ty = E->getSubExpr()->getType();
848 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
849 EmitInitializationToLValue(E->getSubExpr(),
850 CGF.MakeAddrLValue(CastPtr, Ty));
851 break;
852 }
853
854 case CK_LValueToRValueBitCast: {
855 if (Dest.isIgnored()) {
857 /*ignoreResult=*/true);
858 break;
859 }
860
861 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
862 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
863 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
864 llvm::Value *SizeVal = llvm::ConstantInt::get(
865 CGF.SizeTy,
867 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
868 break;
869 }
870
871 case CK_DerivedToBase:
872 case CK_BaseToDerived:
873 case CK_UncheckedDerivedToBase: {
874 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
875 "should have been unpacked before we got here");
876 }
877
878 case CK_NonAtomicToAtomic:
879 case CK_AtomicToNonAtomic: {
880 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
881
882 // Determine the atomic and value types.
883 QualType atomicType = E->getSubExpr()->getType();
884 QualType valueType = E->getType();
885 if (isToAtomic)
886 std::swap(atomicType, valueType);
887
888 assert(atomicType->isAtomicType());
890 valueType, atomicType->castAs<AtomicType>()->getValueType()));
891
892 // Just recurse normally if we're ignoring the result or the
893 // atomic type doesn't change representation.
894 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
895 return Visit(E->getSubExpr());
896 }
897
898 CastKind peepholeTarget =
899 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
900
901 // These two cases are reverses of each other; try to peephole them.
902 if (Expr *op =
903 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
904 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
905 E->getType()) &&
906 "peephole significantly changed types?");
907 return Visit(op);
908 }
909
910 // If we're converting an r-value of non-atomic type to an r-value
911 // of atomic type, just emit directly into the relevant sub-object.
912 if (isToAtomic) {
913 AggValueSlot valueDest = Dest;
914 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
915 // Zero-initialize. (Strictly speaking, we only need to initialize
916 // the padding at the end, but this is simpler.)
917 if (!Dest.isZeroed())
919
920 // Build a GEP to refer to the subobject.
921 Address valueAddr =
922 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
923 valueDest = AggValueSlot::forAddr(
924 valueAddr, valueDest.getQualifiers(),
925 valueDest.isExternallyDestructed(), valueDest.requiresGCollection(),
928 }
929
930 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
931 return;
932 }
933
934 // Otherwise, we're converting an atomic type to a non-atomic type.
935 // Make an atomic temporary, emit into that, and then copy the value out.
936 AggValueSlot atomicSlot =
937 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
938 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
939
940 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
941 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
942 return EmitFinalDestCopy(valueType, rvalue);
943 }
944 case CK_AddressSpaceConversion:
945 return Visit(E->getSubExpr());
946
947 case CK_LValueToRValue:
948 // If we're loading from a volatile type, force the destination
949 // into existence.
950 if (E->getSubExpr()->getType().isVolatileQualified()) {
951 bool Destruct =
952 !Dest.isExternallyDestructed() &&
954 if (Destruct)
956 EnsureDest(E->getType());
957 Visit(E->getSubExpr());
958
959 if (Destruct)
961 E->getType());
962
963 return;
964 }
965
966 [[fallthrough]];
967
968 case CK_HLSLArrayRValue:
969 Visit(E->getSubExpr());
970 break;
971 case CK_HLSLAggregateSplatCast: {
972 Expr *Src = E->getSubExpr();
973 QualType SrcTy = Src->getType();
974 RValue RV = CGF.EmitAnyExpr(Src);
975 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
976 SourceLocation Loc = E->getExprLoc();
977
978 assert(RV.isScalar() && SrcTy->isScalarType() &&
979 "RHS of HLSL splat cast must be a scalar.");
980 llvm::Value *SrcVal = RV.getScalarVal();
981 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
982 break;
983 }
984 case CK_HLSLElementwiseCast: {
985 Expr *Src = E->getSubExpr();
986 QualType SrcTy = Src->getType();
987 RValue RV = CGF.EmitAnyExpr(Src);
988 LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
989 SourceLocation Loc = E->getExprLoc();
990
991 if (RV.isScalar()) {
992 llvm::Value *SrcVal = RV.getScalarVal();
993 assert(SrcTy->isVectorType() &&
994 "HLSL Elementwise cast doesn't handle splatting.");
995 EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
996 } else {
997 assert(RV.isAggregate() &&
998 "Can't perform HLSL Aggregate cast on a complex type.");
999 Address SrcVal = RV.getAggregateAddress();
1000 EmitHLSLElementwiseCast(CGF, DestLVal, CGF.MakeAddrLValue(SrcVal, SrcTy),
1001 Loc);
1002 }
1003 break;
1004 }
1005 case CK_NoOp:
1006 case CK_UserDefinedConversion:
1007 case CK_ConstructorConversion:
1009 E->getType()) &&
1010 "Implicit cast types must be compatible");
1011 Visit(E->getSubExpr());
1012 break;
1013
1014 case CK_LValueBitCast:
1015 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
1016
1017 case CK_Dependent:
1018 case CK_BitCast:
1019 case CK_ArrayToPointerDecay:
1020 case CK_FunctionToPointerDecay:
1021 case CK_NullToPointer:
1022 case CK_NullToMemberPointer:
1023 case CK_BaseToDerivedMemberPointer:
1024 case CK_DerivedToBaseMemberPointer:
1025 case CK_MemberPointerToBoolean:
1026 case CK_ReinterpretMemberPointer:
1027 case CK_IntegralToPointer:
1028 case CK_PointerToIntegral:
1029 case CK_PointerToBoolean:
1030 case CK_ToVoid:
1031 case CK_VectorSplat:
1032 case CK_IntegralCast:
1033 case CK_BooleanToSignedIntegral:
1034 case CK_IntegralToBoolean:
1035 case CK_IntegralToFloating:
1036 case CK_FloatingToIntegral:
1037 case CK_FloatingToBoolean:
1038 case CK_FloatingCast:
1039 case CK_CPointerToObjCPointerCast:
1040 case CK_BlockPointerToObjCPointerCast:
1041 case CK_AnyPointerToBlockPointerCast:
1042 case CK_ObjCObjectLValueCast:
1043 case CK_FloatingRealToComplex:
1044 case CK_FloatingComplexToReal:
1045 case CK_FloatingComplexToBoolean:
1046 case CK_FloatingComplexCast:
1047 case CK_FloatingComplexToIntegralComplex:
1048 case CK_IntegralRealToComplex:
1049 case CK_IntegralComplexToReal:
1050 case CK_IntegralComplexToBoolean:
1051 case CK_IntegralComplexCast:
1052 case CK_IntegralComplexToFloatingComplex:
1053 case CK_ARCProduceObject:
1054 case CK_ARCConsumeObject:
1055 case CK_ARCReclaimReturnedObject:
1056 case CK_ARCExtendBlockObject:
1057 case CK_CopyAndAutoreleaseBlockObject:
1058 case CK_BuiltinFnToFnPtr:
1059 case CK_ZeroToOCLOpaqueType:
1060 case CK_MatrixCast:
1061 case CK_HLSLVectorTruncation:
1062 case CK_HLSLMatrixTruncation:
1063 case CK_IntToOCLSampler:
1064 case CK_FloatingToFixedPoint:
1065 case CK_FixedPointToFloating:
1066 case CK_FixedPointCast:
1067 case CK_FixedPointToBoolean:
1068 case CK_FixedPointToIntegral:
1069 case CK_IntegralToFixedPoint:
1070 llvm_unreachable("cast kind invalid for aggregate types");
1071 }
1072}
1073
1074void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
1075 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
1076 EmitAggLoadOfLValue(E);
1077 return;
1078 }
1079
1080 withReturnValueSlot(
1081 E, [&](ReturnValueSlot Slot) { return CGF.EmitCallExpr(E, Slot); });
1082}
1083
1084void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
1085 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
1086 return CGF.EmitObjCMessageExpr(E, Slot);
1087 });
1088}
1089
1090void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
1091 CGF.EmitIgnoredExpr(E->getLHS());
1092 Visit(E->getRHS());
1093}
1094
1095void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1096 CodeGenFunction::StmtExprEvaluation eval(CGF);
1097 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
1098}
1099
1105
1106static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
1107 const BinaryOperator *E, llvm::Value *LHS,
1108 llvm::Value *RHS, CompareKind Kind,
1109 const char *NameSuffix = "") {
1110 QualType ArgTy = E->getLHS()->getType();
1111 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1112 ArgTy = CT->getElementType();
1113
1114 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1115 assert(Kind == CK_Equal &&
1116 "member pointers may only be compared for equality");
1118 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1119 }
1120
1121 // Compute the comparison instructions for the specified comparison kind.
1122 struct CmpInstInfo {
1123 const char *Name;
1124 llvm::CmpInst::Predicate FCmp;
1125 llvm::CmpInst::Predicate SCmp;
1126 llvm::CmpInst::Predicate UCmp;
1127 };
1128 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1129 using FI = llvm::FCmpInst;
1130 using II = llvm::ICmpInst;
1131 switch (Kind) {
1132 case CK_Less:
1133 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1134 case CK_Greater:
1135 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1136 case CK_Equal:
1137 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1138 }
1139 llvm_unreachable("Unrecognised CompareKind enum");
1140 }();
1141
1142 if (ArgTy->hasFloatingRepresentation())
1143 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1144 llvm::Twine(InstInfo.Name) + NameSuffix);
1145 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1146 auto Inst =
1147 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1148 return Builder.CreateICmp(Inst, LHS, RHS,
1149 llvm::Twine(InstInfo.Name) + NameSuffix);
1150 }
1151
1152 llvm_unreachable("unsupported aggregate binary expression should have "
1153 "already been handled");
1154}
1155
1156void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1157 using llvm::BasicBlock;
1158 using llvm::PHINode;
1159 using llvm::Value;
1160 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1161 E->getRHS()->getType()));
1162 const ComparisonCategoryInfo &CmpInfo =
1164 assert(CmpInfo.Record->isTriviallyCopyable() &&
1165 "cannot copy non-trivially copyable aggregate");
1166
1167 QualType ArgTy = E->getLHS()->getType();
1168
1169 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1170 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1171 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1172 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1173 }
1174 bool IsComplex = ArgTy->isAnyComplexType();
1175
1176 // Evaluate the operands to the expression and extract their values.
1177 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1178 RValue RV = CGF.EmitAnyExpr(E);
1179 if (RV.isScalar())
1180 return {RV.getScalarVal(), nullptr};
1181 if (RV.isAggregate())
1182 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1183 assert(RV.isComplex());
1184 return RV.getComplexVal();
1185 };
1186 auto LHSValues = EmitOperand(E->getLHS()),
1187 RHSValues = EmitOperand(E->getRHS());
1188
1189 auto EmitCmp = [&](CompareKind K) {
1190 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1191 K, IsComplex ? ".r" : "");
1192 if (!IsComplex)
1193 return Cmp;
1194 assert(K == CompareKind::CK_Equal);
1195 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1196 RHSValues.second, K, ".i");
1197 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1198 };
1199 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1200 return Builder.getInt(VInfo->getIntValue());
1201 };
1202
1203 Value *Select;
1204 if (ArgTy->isNullPtrType()) {
1205 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1206 } else if (!CmpInfo.isPartial()) {
1207 Value *SelectOne =
1208 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1209 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1210 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1211 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1212 SelectOne, "sel.eq");
1213 } else {
1214 Value *SelectEq = Builder.CreateSelect(
1215 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1216 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1217 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1218 EmitCmpRes(CmpInfo.getGreater()),
1219 SelectEq, "sel.gt");
1220 Select = Builder.CreateSelect(
1221 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1222 }
1223 // Create the return value in the destination slot.
1224 EnsureDest(E->getType());
1225 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1226
1227 // Emit the address of the first (and only) field in the comparison category
1228 // type, and initialize it from the constant integer value selected above.
1229 LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1230 DestLV, *CmpInfo.Record->field_begin());
1231 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1232
1233 // All done! The result is in the Dest slot.
1234}
1235
1236void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1237 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1238 VisitPointerToDataMemberBinaryOperator(E);
1239 else
1240 CGF.ErrorUnsupported(E, "aggregate binary expression");
1241}
1242
1243void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1244 const BinaryOperator *E) {
1245 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1246 EmitFinalDestCopy(E->getType(), LV);
1247}
1248
1249/// Is the value of the given expression possibly a reference to or
1250/// into a __block variable?
1251static bool isBlockVarRef(const Expr *E) {
1252 // Make sure we look through parens.
1253 E = E->IgnoreParens();
1254
1255 // Check for a direct reference to a __block variable.
1256 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1257 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1258 return (var && var->hasAttr<BlocksAttr>());
1259 }
1260
1261 // More complicated stuff.
1262
1263 // Binary operators.
1264 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1265 // For an assignment or pointer-to-member operation, just care
1266 // about the LHS.
1267 if (op->isAssignmentOp() || op->isPtrMemOp())
1268 return isBlockVarRef(op->getLHS());
1269
1270 // For a comma, just care about the RHS.
1271 if (op->getOpcode() == BO_Comma)
1272 return isBlockVarRef(op->getRHS());
1273
1274 // FIXME: pointer arithmetic?
1275 return false;
1276
1277 // Check both sides of a conditional operator.
1278 } else if (const AbstractConditionalOperator *op =
1279 dyn_cast<AbstractConditionalOperator>(E)) {
1280 return isBlockVarRef(op->getTrueExpr()) ||
1281 isBlockVarRef(op->getFalseExpr());
1282
1283 // OVEs are required to support BinaryConditionalOperators.
1284 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(E)) {
1285 if (const Expr *src = op->getSourceExpr())
1286 return isBlockVarRef(src);
1287
1288 // Casts are necessary to get things like (*(int*)&var) = foo().
1289 // We don't really care about the kind of cast here, except
1290 // we don't want to look through l2r casts, because it's okay
1291 // to get the *value* in a __block variable.
1292 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1293 if (cast->getCastKind() == CK_LValueToRValue)
1294 return false;
1295 return isBlockVarRef(cast->getSubExpr());
1296
1297 // Handle unary operators. Again, just aggressively look through
1298 // it, ignoring the operation.
1299 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1300 return isBlockVarRef(uop->getSubExpr());
1301
1302 // Look into the base of a field access.
1303 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1304 return isBlockVarRef(mem->getBase());
1305
1306 // Look into the base of a subscript.
1307 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1308 return isBlockVarRef(sub->getBase());
1309 }
1310
1311 return false;
1312}
1313
1314void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1315 ApplyAtomGroup Grp(CGF.getDebugInfo());
1316 // For an assignment to work, the value on the right has
1317 // to be compatible with the value on the left.
1318 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1319 E->getRHS()->getType()) &&
1320 "Invalid assignment");
1321
1322 // If the LHS might be a __block variable, and the RHS can
1323 // potentially cause a block copy, we need to evaluate the RHS first
1324 // so that the assignment goes the right place.
1325 // This is pretty semantically fragile.
1326 if (isBlockVarRef(E->getLHS()) &&
1327 E->getRHS()->HasSideEffects(CGF.getContext())) {
1328 // Ensure that we have a destination, and evaluate the RHS into that.
1329 EnsureDest(E->getRHS()->getType());
1330 Visit(E->getRHS());
1331
1332 // Now emit the LHS and copy into it.
1333 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1334
1335 // That copy is an atomic copy if the LHS is atomic.
1336 if (LHS.getType()->isAtomicType() ||
1338 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1339 return;
1340 }
1341
1342 EmitCopy(E->getLHS()->getType(),
1344 needsGC(E->getLHS()->getType()),
1347 Dest);
1348 return;
1349 }
1350
1351 LValue LHS = CGF.EmitLValue(E->getLHS());
1352
1353 // If we have an atomic type, evaluate into the destination and then
1354 // do an atomic copy.
1355 if (LHS.getType()->isAtomicType() ||
1357 EnsureDest(E->getRHS()->getType());
1358 Visit(E->getRHS());
1359 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1360 return;
1361 }
1362
1363 // Codegen the RHS so that it stores directly into the LHS.
1364 AggValueSlot LHSSlot = AggValueSlot::forLValue(
1365 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1367 // A non-volatile aggregate destination might have volatile member.
1368 if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType()))
1369 LHSSlot.setVolatile(true);
1370
1371 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1372
1373 // Copy into the destination if the assignment isn't ignored.
1374 EmitFinalDestCopy(E->getType(), LHS);
1375
1376 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1379 E->getType());
1380}
1381
1382void AggExprEmitter::VisitAbstractConditionalOperator(
1383 const AbstractConditionalOperator *E) {
1384 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1385 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1386 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1387
1388 // Bind the common expression if necessary.
1389 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1390
1391 CodeGenFunction::ConditionalEvaluation eval(CGF);
1392 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1393 CGF.getProfileCount(E));
1394
1395 // Save whether the destination's lifetime is externally managed.
1396 bool isExternallyDestructed = Dest.isExternallyDestructed();
1397 bool destructNonTrivialCStruct =
1398 !isExternallyDestructed &&
1400 isExternallyDestructed |= destructNonTrivialCStruct;
1401 Dest.setExternallyDestructed(isExternallyDestructed);
1402
1403 eval.begin(CGF);
1404 CGF.EmitBlock(LHSBlock);
1406 Visit(E->getTrueExpr());
1407 eval.end(CGF);
1408
1409 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1410 CGF.Builder.CreateBr(ContBlock);
1411
1412 // If the result of an agg expression is unused, then the emission
1413 // of the LHS might need to create a destination slot. That's fine
1414 // with us, and we can safely emit the RHS into the same slot, but
1415 // we shouldn't claim that it's already being destructed.
1416 Dest.setExternallyDestructed(isExternallyDestructed);
1417
1418 eval.begin(CGF);
1419 CGF.EmitBlock(RHSBlock);
1421 Visit(E->getFalseExpr());
1422 eval.end(CGF);
1423
1424 if (destructNonTrivialCStruct)
1426 E->getType());
1427
1428 CGF.EmitBlock(ContBlock);
1429}
1430
1431void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1432 Visit(CE->getChosenSubExpr());
1433}
1434
1435void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1436 Address ArgValue = Address::invalid();
1437 CGF.EmitVAArg(VE, ArgValue, Dest);
1438
1439 // If EmitVAArg fails, emit an error.
1440 if (!ArgValue.isValid()) {
1441 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1442 return;
1443 }
1444}
1445
1446void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1447 // Ensure that we have a slot, but if we already do, remember
1448 // whether it was externally destructed.
1449 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1450 EnsureDest(E->getType());
1451
1452 // We're going to push a destructor if there isn't already one.
1454
1455 Visit(E->getSubExpr());
1456
1457 // Push that destructor we promised.
1458 if (!wasExternallyDestructed)
1459 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1460}
1461
1462void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1463 AggValueSlot Slot = EnsureSlot(E->getType());
1464 CGF.EmitCXXConstructExpr(E, Slot);
1465}
1466
1467void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1468 const CXXInheritedCtorInitExpr *E) {
1469 AggValueSlot Slot = EnsureSlot(E->getType());
1471 Slot.getAddress(),
1472 E->inheritedFromVBase(), E);
1473}
1474
1475void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1476 AggValueSlot Slot = EnsureSlot(E->getType());
1477 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1478
1479 // We'll need to enter cleanup scopes in case any of the element
1480 // initializers throws an exception or contains branch out of the expressions.
1481 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1482
1483 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1485 e = E->capture_init_end();
1486 i != e; ++i, ++CurField) {
1487 // Emit initialization
1488 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1489 if (CurField->hasCapturedVLAType()) {
1490 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1491 continue;
1492 }
1493
1494 EmitInitializationToLValue(*i, LV);
1495
1496 // Push a destructor if necessary.
1497 if (QualType::DestructionKind DtorKind =
1498 CurField->getType().isDestructedType()) {
1499 assert(LV.isSimple());
1500 if (DtorKind)
1502 CurField->getType(),
1503 CGF.getDestroyer(DtorKind), false);
1504 }
1505 }
1506}
1507
1508void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1509 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1510 Visit(E->getSubExpr());
1511}
1512
1513void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1514 QualType T = E->getType();
1515 AggValueSlot Slot = EnsureSlot(T);
1516 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1517}
1518
1519void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1520 QualType T = E->getType();
1521 AggValueSlot Slot = EnsureSlot(T);
1522 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1523}
1524
1525/// Determine whether the given cast kind is known to always convert values
1526/// with all zero bits in their value representation to values with all zero
1527/// bits in their value representation.
1528static bool castPreservesZero(const CastExpr *CE) {
1529 switch (CE->getCastKind()) {
1530 // No-ops.
1531 case CK_NoOp:
1532 case CK_UserDefinedConversion:
1533 case CK_ConstructorConversion:
1534 case CK_BitCast:
1535 case CK_ToUnion:
1536 case CK_ToVoid:
1537 // Conversions between (possibly-complex) integral, (possibly-complex)
1538 // floating-point, and bool.
1539 case CK_BooleanToSignedIntegral:
1540 case CK_FloatingCast:
1541 case CK_FloatingComplexCast:
1542 case CK_FloatingComplexToBoolean:
1543 case CK_FloatingComplexToIntegralComplex:
1544 case CK_FloatingComplexToReal:
1545 case CK_FloatingRealToComplex:
1546 case CK_FloatingToBoolean:
1547 case CK_FloatingToIntegral:
1548 case CK_IntegralCast:
1549 case CK_IntegralComplexCast:
1550 case CK_IntegralComplexToBoolean:
1551 case CK_IntegralComplexToFloatingComplex:
1552 case CK_IntegralComplexToReal:
1553 case CK_IntegralRealToComplex:
1554 case CK_IntegralToBoolean:
1555 case CK_IntegralToFloating:
1556 // Reinterpreting integers as pointers and vice versa.
1557 case CK_IntegralToPointer:
1558 case CK_PointerToIntegral:
1559 // Language extensions.
1560 case CK_VectorSplat:
1561 case CK_MatrixCast:
1562 case CK_NonAtomicToAtomic:
1563 case CK_AtomicToNonAtomic:
1564 case CK_HLSLVectorTruncation:
1565 case CK_HLSLMatrixTruncation:
1566 case CK_HLSLElementwiseCast:
1567 case CK_HLSLAggregateSplatCast:
1568 return true;
1569
1570 case CK_BaseToDerivedMemberPointer:
1571 case CK_DerivedToBaseMemberPointer:
1572 case CK_MemberPointerToBoolean:
1573 case CK_NullToMemberPointer:
1574 case CK_ReinterpretMemberPointer:
1575 // FIXME: ABI-dependent.
1576 return false;
1577
1578 case CK_AnyPointerToBlockPointerCast:
1579 case CK_BlockPointerToObjCPointerCast:
1580 case CK_CPointerToObjCPointerCast:
1581 case CK_ObjCObjectLValueCast:
1582 case CK_IntToOCLSampler:
1583 case CK_ZeroToOCLOpaqueType:
1584 // FIXME: Check these.
1585 return false;
1586
1587 case CK_FixedPointCast:
1588 case CK_FixedPointToBoolean:
1589 case CK_FixedPointToFloating:
1590 case CK_FixedPointToIntegral:
1591 case CK_FloatingToFixedPoint:
1592 case CK_IntegralToFixedPoint:
1593 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1594 return false;
1595
1596 case CK_AddressSpaceConversion:
1597 case CK_BaseToDerived:
1598 case CK_DerivedToBase:
1599 case CK_Dynamic:
1600 case CK_NullToPointer:
1601 case CK_PointerToBoolean:
1602 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1603 // same representation in all involved address spaces.
1604 return false;
1605
1606 case CK_ARCConsumeObject:
1607 case CK_ARCExtendBlockObject:
1608 case CK_ARCProduceObject:
1609 case CK_ARCReclaimReturnedObject:
1610 case CK_CopyAndAutoreleaseBlockObject:
1611 case CK_ArrayToPointerDecay:
1612 case CK_FunctionToPointerDecay:
1613 case CK_BuiltinFnToFnPtr:
1614 case CK_Dependent:
1615 case CK_LValueBitCast:
1616 case CK_LValueToRValue:
1617 case CK_LValueToRValueBitCast:
1618 case CK_UncheckedDerivedToBase:
1619 case CK_HLSLArrayRValue:
1620 return false;
1621 }
1622 llvm_unreachable("Unhandled clang::CastKind enum");
1623}
1624
1625/// isSimpleZero - If emitting this value will obviously just cause a store of
1626/// zero to memory, return true. This can return false if uncertain, so it just
1627/// handles simple cases.
1628static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1629 E = E->IgnoreParens();
1630 while (auto *CE = dyn_cast<CastExpr>(E)) {
1631 if (!castPreservesZero(CE))
1632 break;
1633 E = CE->getSubExpr()->IgnoreParens();
1634 }
1635
1636 // 0
1637 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1638 return IL->getValue() == 0;
1639 // +0.0
1640 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1641 return FL->getValue().isPosZero();
1642 // int()
1645 return true;
1646 // (int*)0 - Null pointer expressions.
1647 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1648 return ICE->getCastKind() == CK_NullToPointer &&
1650 !E->HasSideEffects(CGF.getContext());
1651 // '\0'
1652 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1653 return CL->getValue() == 0;
1654
1655 // Otherwise, hard case: conservatively return false.
1656 return false;
1657}
1658
1659void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1660 QualType type = LV.getType();
1661 // FIXME: Ignore result?
1662 // FIXME: Are initializers affected by volatile?
1663 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1664 // Storing "i32 0" to a zero'd memory location is a noop.
1665 return;
1667 return EmitNullInitializationToLValue(LV);
1668 } else if (isa<NoInitExpr>(E)) {
1669 // Do nothing.
1670 return;
1671 } else if (type->isReferenceType()) {
1672 RValue RV = CGF.EmitReferenceBindingToExpr(E);
1673 return CGF.EmitStoreThroughLValue(RV, LV);
1674 }
1675
1676 CGF.EmitInitializationToLValue(E, LV, Dest.isZeroed());
1677}
1678
1679void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1680 QualType type = lv.getType();
1681
1682 // If the destination slot is already zeroed out before the aggregate is
1683 // copied into it, we don't have to emit any zeros here.
1684 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1685 return;
1686
1687 if (CGF.hasScalarEvaluationKind(type)) {
1688 // For non-aggregates, we can store the appropriate null constant.
1689 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1690 // Note that the following is not equivalent to
1691 // EmitStoreThroughBitfieldLValue for ARC types.
1692 if (lv.isBitField()) {
1694 } else {
1695 assert(lv.isSimple());
1696 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1697 }
1698 } else {
1699 // There's a potential optimization opportunity in combining
1700 // memsets; that would be easy for arrays, but relatively
1701 // difficult for structures with the current code.
1702 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1703 }
1704}
1705
1706void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1707 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1709 E->getArrayFiller());
1710}
1711
1712void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1713 if (E->hadArrayRangeDesignator())
1714 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1715
1716 if (E->isTransparent())
1717 return Visit(E->getInit(0));
1718
1719 VisitCXXParenListOrInitListExpr(
1720 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1721}
1722
1723void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1724 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1725 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1726#if 0
1727 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1728 // (Length of globals? Chunks of zeroed-out space?).
1729 //
1730 // If we can, prefer a copy from a global; this is a lot less code for long
1731 // globals, and it's easier for the current optimizers to analyze.
1732 if (llvm::Constant *C =
1733 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1734 llvm::GlobalVariable* GV =
1735 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1736 llvm::GlobalValue::InternalLinkage, C, "");
1737 EmitFinalDestCopy(ExprToVisit->getType(),
1738 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1739 return;
1740 }
1741#endif
1742
1743 // HLSL initialization lists in the AST are an expansion which can contain
1744 // side-effecting expressions wrapped in opaque value expressions. To properly
1745 // emit these we need to emit the opaque values before we emit the argument
1746 // expressions themselves. This is a little hacky, but it prevents us needing
1747 // to do a bigger AST-level change for a language feature that we need
1748 // deprecate in the near future. See related HLSL language proposals:
1749 // * 0005-strict-initializer-lists.md
1750 // * https://github.com/microsoft/hlsl-specs/pull/325
1751 if (CGF.getLangOpts().HLSL && isa<InitListExpr>(ExprToVisit))
1753 CGF, cast<InitListExpr>(ExprToVisit));
1754
1755 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1756
1757 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1758
1759 // Handle initialization of an array.
1760 if (ExprToVisit->getType()->isConstantArrayType()) {
1761 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1762 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1763 InitExprs, ArrayFiller);
1764 return;
1765 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1766 // A variable array type that has an initializer can only do empty
1767 // initialization. And because this feature is not exposed as an extension
1768 // in C++, we can safely memset the array memory to zero.
1769 assert(InitExprs.size() == 0 &&
1770 "you can only use an empty initializer with VLAs");
1771 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1772 return;
1773 }
1774
1775 assert(ExprToVisit->getType()->isRecordType() &&
1776 "Only support structs/unions here!");
1777
1778 // Do struct initialization; this code just sets each individual member
1779 // to the approprate value. This makes bitfield support automatic;
1780 // the disadvantage is that the generated code is more difficult for
1781 // the optimizer, especially with bitfields.
1782 unsigned NumInitElements = InitExprs.size();
1783 RecordDecl *record = ExprToVisit->getType()->castAsRecordDecl();
1784
1785 // We'll need to enter cleanup scopes in case any of the element
1786 // initializers throws an exception.
1787 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1788
1789 unsigned curInitIndex = 0;
1790
1791 // Emit initialization of base classes.
1792 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1793 assert(NumInitElements >= CXXRD->getNumBases() &&
1794 "missing initializer for base class");
1795 for (auto &Base : CXXRD->bases()) {
1796 assert(!Base.isVirtual() && "should not see vbases here");
1797 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1799 Dest.getAddress(), CXXRD, BaseRD,
1800 /*isBaseVirtual*/ false);
1801 AggValueSlot AggSlot = AggValueSlot::forAddr(
1802 V, Qualifiers(), AggValueSlot::IsDestructed,
1804 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1805 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1806
1807 if (QualType::DestructionKind dtorKind =
1808 Base.getType().isDestructedType())
1809 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1810 }
1811 }
1812
1813 // Prepare a 'this' for CXXDefaultInitExprs.
1814 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1815
1816 const bool ZeroInitPadding =
1817 CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
1818
1819 if (record->isUnion()) {
1820 // Only initialize one field of a union. The field itself is
1821 // specified by the initializer list.
1822 if (!InitializedFieldInUnion) {
1823 // Empty union; we have nothing to do.
1824
1825#ifndef NDEBUG
1826 // Make sure that it's really an empty and not a failure of
1827 // semantic analysis.
1828 for (const auto *Field : record->fields())
1829 assert(
1830 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1831 "Only unnamed bitfields or anonymous class allowed");
1832#endif
1833 return;
1834 }
1835
1836 // FIXME: volatility
1837 FieldDecl *Field = InitializedFieldInUnion;
1838
1839 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1840 if (NumInitElements) {
1841 // Store the initializer into the field
1842 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1843 if (ZeroInitPadding) {
1844 uint64_t TotalSize = CGF.getContext().toBits(
1845 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1846 uint64_t FieldSize = CGF.getContext().getTypeSize(FieldLoc.getType());
1847 DoZeroInitPadding(FieldSize, TotalSize, nullptr);
1848 }
1849 } else {
1850 // Default-initialize to null.
1851 if (ZeroInitPadding)
1852 EmitNullInitializationToLValue(DestLV);
1853 else
1854 EmitNullInitializationToLValue(FieldLoc);
1855 }
1856 return;
1857 }
1858
1859 // Here we iterate over the fields; this makes it simpler to both
1860 // default-initialize fields and skip over unnamed fields.
1861 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
1862 uint64_t PaddingStart = 0;
1863
1864 for (const auto *field : record->fields()) {
1865 // We're done once we hit the flexible array member.
1866 if (field->getType()->isIncompleteArrayType())
1867 break;
1868
1869 // Always skip anonymous bitfields.
1870 if (field->isUnnamedBitField())
1871 continue;
1872
1873 // We're done if we reach the end of the explicit initializers, we
1874 // have a zeroed object, and the rest of the fields are
1875 // zero-initializable.
1876 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1877 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1878 break;
1879
1880 if (ZeroInitPadding)
1881 DoZeroInitPadding(PaddingStart,
1882 Layout.getFieldOffset(field->getFieldIndex()), field);
1883
1884 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1885 // We never generate write-barries for initialized fields.
1886 LV.setNonGC(true);
1887
1888 if (curInitIndex < NumInitElements) {
1889 // Store the initializer into the field.
1890 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1891 } else {
1892 // We're out of initializers; default-initialize to null
1893 EmitNullInitializationToLValue(LV);
1894 }
1895
1896 // Push a destructor if necessary.
1897 // FIXME: if we have an array of structures, all explicitly
1898 // initialized, we can end up pushing a linear number of cleanups.
1899 if (QualType::DestructionKind dtorKind =
1900 field->getType().isDestructedType()) {
1901 assert(LV.isSimple());
1902 if (dtorKind) {
1904 field->getType(),
1905 CGF.getDestroyer(dtorKind), false);
1906 }
1907 }
1908 }
1909 if (ZeroInitPadding) {
1910 uint64_t TotalSize = CGF.getContext().toBits(
1911 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1912 DoZeroInitPadding(PaddingStart, TotalSize, nullptr);
1913 }
1914}
1915
1916void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart,
1917 uint64_t PaddingEnd,
1918 const FieldDecl *NextField) {
1919
1920 auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) {
1921 CharUnits Start = CGF.getContext().toCharUnitsFromBits(StartBit);
1922 CharUnits End = CGF.getContext().toCharUnitsFromBits(EndBit);
1924 if (!Start.isZero())
1925 Addr = Builder.CreateConstGEP(Addr, Start.getQuantity());
1926 llvm::Constant *SizeVal = Builder.getInt64((End - Start).getQuantity());
1927 CGF.Builder.CreateMemSet(Addr, Builder.getInt8(0), SizeVal, false);
1928 };
1929
1930 if (NextField != nullptr && NextField->isBitField()) {
1931 // For bitfield, zero init StorageSize before storing the bits. So we don't
1932 // need to handle big/little endian.
1933 const CGRecordLayout &RL =
1934 CGF.getTypes().getCGRecordLayout(NextField->getParent());
1935 const CGBitFieldInfo &Info = RL.getBitFieldInfo(NextField);
1936 uint64_t StorageStart = CGF.getContext().toBits(Info.StorageOffset);
1937 if (StorageStart + Info.StorageSize > PaddingStart) {
1938 if (StorageStart > PaddingStart)
1939 InitBytes(PaddingStart, StorageStart);
1940 Address Addr = Dest.getAddress();
1941 if (!Info.StorageOffset.isZero())
1942 Addr = Builder.CreateConstGEP(Addr.withElementType(CGF.CharTy),
1943 Info.StorageOffset.getQuantity());
1944 Addr = Addr.withElementType(
1945 llvm::Type::getIntNTy(CGF.getLLVMContext(), Info.StorageSize));
1946 Builder.CreateStore(Builder.getIntN(Info.StorageSize, 0), Addr);
1947 PaddingStart = StorageStart + Info.StorageSize;
1948 }
1949 return;
1950 }
1951
1952 if (PaddingStart < PaddingEnd)
1953 InitBytes(PaddingStart, PaddingEnd);
1954 if (NextField != nullptr)
1955 PaddingStart =
1956 PaddingEnd + CGF.getContext().getTypeSize(NextField->getType());
1957}
1958
1959void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1960 llvm::Value *outerBegin) {
1961 // Emit the common subexpression.
1962 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1963
1964 Address destPtr = EnsureSlot(E->getType()).getAddress();
1965 uint64_t numElements = E->getArraySize().getZExtValue();
1966
1967 if (!numElements)
1968 return;
1969
1970 // destPtr is an array*. Construct an elementType* by drilling down a level.
1971 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1972 llvm::Value *indices[] = {zero, zero};
1973 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1974 destPtr.emitRawPointer(CGF),
1975 indices, "arrayinit.begin");
1976
1977 // Prepare to special-case multidimensional array initialization: we avoid
1978 // emitting multiple destructor loops in that case.
1979 if (!outerBegin)
1980 outerBegin = begin;
1981 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1982
1983 QualType elementType =
1985 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1986 CharUnits elementAlign =
1987 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1988 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1989
1990 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1991 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1992
1993 // Jump into the body.
1994 CGF.EmitBlock(bodyBB);
1995 llvm::PHINode *index =
1996 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1997 index->addIncoming(zero, entryBB);
1998 llvm::Value *element =
1999 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
2000
2001 // Prepare for a cleanup.
2002 QualType::DestructionKind dtorKind = elementType.isDestructedType();
2003 EHScopeStack::stable_iterator cleanup;
2004 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
2005 if (outerBegin->getType() != element->getType())
2006 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
2007 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
2008 elementAlign,
2009 CGF.getDestroyer(dtorKind));
2011 } else {
2012 dtorKind = QualType::DK_none;
2013 }
2014
2015 // Emit the actual filler expression.
2016 {
2017 // Temporaries created in an array initialization loop are destroyed
2018 // at the end of each iteration.
2019 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
2020 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
2021 LValue elementLV = CGF.MakeAddrLValue(
2022 Address(element, llvmElementType, elementAlign), elementType);
2023
2024 if (InnerLoop) {
2025 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
2026 auto elementSlot = AggValueSlot::forLValue(
2027 elementLV, AggValueSlot::IsDestructed,
2030 AggExprEmitter(CGF, elementSlot, false)
2031 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
2032 } else
2033 EmitInitializationToLValue(E->getSubExpr(), elementLV);
2034 }
2035
2036 // Move on to the next element.
2037 llvm::Value *nextIndex = Builder.CreateNUWAdd(
2038 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
2039 index->addIncoming(nextIndex, Builder.GetInsertBlock());
2040
2041 // Leave the loop if we're done.
2042 llvm::Value *done = Builder.CreateICmpEQ(
2043 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
2044 "arrayinit.done");
2045 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
2046 Builder.CreateCondBr(done, endBB, bodyBB);
2047
2048 CGF.EmitBlock(endBB);
2049
2050 // Leave the partial-array cleanup if we entered one.
2051 if (dtorKind)
2052 CGF.DeactivateCleanupBlock(cleanup, index);
2053}
2054
2055void AggExprEmitter::VisitDesignatedInitUpdateExpr(
2056 DesignatedInitUpdateExpr *E) {
2057 AggValueSlot Dest = EnsureSlot(E->getType());
2058
2059 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
2060 EmitInitializationToLValue(E->getBase(), DestLV);
2061 VisitInitListExpr(E->getUpdater());
2062}
2063
2064//===----------------------------------------------------------------------===//
2065// Entry Points into this File
2066//===----------------------------------------------------------------------===//
2067
2068/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
2069/// non-zero bytes that will be stored when outputting the initializer for the
2070/// specified initializer expression.
2072 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
2073 E = MTE->getSubExpr();
2074 E = E->IgnoreParenNoopCasts(CGF.getContext());
2075
2076 // 0 and 0.0 won't require any non-zero stores!
2077 if (isSimpleZero(E, CGF))
2078 return CharUnits::Zero();
2079
2080 // If this is an initlist expr, sum up the size of sizes of the (present)
2081 // elements. If this is something weird, assume the whole thing is non-zero.
2082 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
2083 while (ILE && ILE->isTransparent())
2084 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
2085 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
2086 return CGF.getContext().getTypeSizeInChars(E->getType());
2087
2088 // InitListExprs for structs have to be handled carefully. If there are
2089 // reference members, we need to consider the size of the reference, not the
2090 // referencee. InitListExprs for unions and arrays can't have references.
2091 if (const RecordType *RT = E->getType()->getAsCanonical<RecordType>()) {
2092 if (!RT->isUnionType()) {
2093 RecordDecl *SD = RT->getDecl()->getDefinitionOrSelf();
2094 CharUnits NumNonZeroBytes = CharUnits::Zero();
2095
2096 unsigned ILEElement = 0;
2097 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
2098 while (ILEElement != CXXRD->getNumBases())
2099 NumNonZeroBytes +=
2100 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
2101 for (const auto *Field : SD->fields()) {
2102 // We're done once we hit the flexible array member or run out of
2103 // InitListExpr elements.
2104 if (Field->getType()->isIncompleteArrayType() ||
2105 ILEElement == ILE->getNumInits())
2106 break;
2107 if (Field->isUnnamedBitField())
2108 continue;
2109
2110 const Expr *E = ILE->getInit(ILEElement++);
2111
2112 // Reference values are always non-null and have the width of a pointer.
2113 if (Field->getType()->isReferenceType())
2114 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
2116 else
2117 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
2118 }
2119
2120 return NumNonZeroBytes;
2121 }
2122 }
2123
2124 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2125 CharUnits NumNonZeroBytes = CharUnits::Zero();
2126 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
2127 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
2128 return NumNonZeroBytes;
2129}
2130
2131/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2132/// zeros in it, emit a memset and avoid storing the individual zeros.
2133///
2134static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
2135 CodeGenFunction &CGF) {
2136 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2137 // volatile stores.
2138 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2139 return;
2140
2141 // C++ objects with a user-declared constructor don't need zero'ing.
2142 if (CGF.getLangOpts().CPlusPlus)
2143 if (const RecordType *RT = CGF.getContext()
2145 ->getAsCanonical<RecordType>()) {
2146 const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
2148 return;
2149 }
2150
2151 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2152 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2153 if (Size <= CharUnits::fromQuantity(16))
2154 return;
2155
2156 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2157 // we prefer to emit memset + individual stores for the rest.
2158 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2159 if (NumNonZeroBytes * 4 > Size)
2160 return;
2161
2162 // Okay, it seems like a good idea to use an initial memset, emit the call.
2163 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2164
2165 Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
2166 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2167
2168 // Tell the AggExprEmitter that the slot is known zero.
2169 Slot.setZeroed();
2170}
2171
2172/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2173/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2174/// the value of the aggregate expression is not needed. If VolatileDest is
2175/// true, DestPtr cannot be 0.
2177 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2178 "Invalid aggregate expression to emit");
2179 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2180 "slot has bits but no address");
2181
2182 // Optimize the slot if possible.
2183 CheckAggExprForMemSetUse(Slot, E, *this);
2184
2185 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr *>(E));
2186}
2187
2198
2200 const LValue &Src,
2201 ExprValueKind SrcKind) {
2202 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2203 .EmitFinalDestCopy(Type, Src, SrcKind);
2204}
2205
2208 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2210
2211 // Empty fields can overlap earlier fields.
2212 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2214
2215 // If the field lies entirely within the enclosing class's nvsize, its tail
2216 // padding cannot overlap any already-initialized object. (The only subobjects
2217 // with greater addresses that might already be initialized are vbases.)
2218 const RecordDecl *ClassRD = FD->getParent();
2219 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2220 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2221 getContext().getTypeSize(FD->getType()) <=
2222 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2224
2225 // The tail padding may contain values we need to preserve.
2227}
2228
2230 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2231 // If the most-derived object is a field declared with [[no_unique_address]],
2232 // the tail padding of any virtual base could be reused for other subobjects
2233 // of that field's class.
2234 if (IsVirtual)
2236
2237 // Empty bases can overlap earlier bases.
2238 if (BaseRD->isEmpty())
2240
2241 // If the base class is laid out entirely within the nvsize of the derived
2242 // class, its tail padding cannot yet be initialized, so we can issue
2243 // stores at the full width of the base class.
2244 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2245 if (Layout.getBaseClassOffset(BaseRD) +
2246 getContext().getASTRecordLayout(BaseRD).getSize() <=
2247 Layout.getNonVirtualSize())
2249
2250 // The tail padding may contain values we need to preserve.
2252}
2253
2255 AggValueSlot::Overlap_t MayOverlap,
2256 bool isVolatile) {
2257 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2258
2259 Address DestPtr = Dest.getAddress();
2260 Address SrcPtr = Src.getAddress();
2261
2262 if (getLangOpts().CPlusPlus) {
2263 if (const auto *Record = Ty->getAsCXXRecordDecl()) {
2264 assert((Record->hasTrivialCopyConstructor() ||
2265 Record->hasTrivialCopyAssignment() ||
2266 Record->hasTrivialMoveConstructor() ||
2267 Record->hasTrivialMoveAssignment() ||
2268 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2269 "Trying to aggregate-copy a type without a trivial copy/move "
2270 "constructor or assignment operator");
2271 // Ignore empty classes in C++.
2272 if (Record->isEmpty())
2273 return;
2274 }
2275 }
2276
2277 if (getLangOpts().CUDAIsDevice) {
2279 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2280 Src))
2281 return;
2282 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2283 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2284 Src))
2285 return;
2286 }
2287 }
2288
2290 if (CGM.getHLSLRuntime().emitBufferCopy(*this, DestPtr, SrcPtr, Ty))
2291 return;
2292
2293 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2294 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2295 // read from another object that overlaps in anyway the storage of the first
2296 // object, then the overlap shall be exact and the two objects shall have
2297 // qualified or unqualified versions of a compatible type."
2298 //
2299 // memcpy is not defined if the source and destination pointers are exactly
2300 // equal, but other compilers do this optimization, and almost every memcpy
2301 // implementation handles this case safely. If there is a libc that does not
2302 // safely handle this, we can add a target hook.
2303
2304 // Get data size info for this aggregate. Don't copy the tail padding if this
2305 // might be a potentially-overlapping subobject, since the tail padding might
2306 // be occupied by a different object. Otherwise, copying it is fine.
2308 if (MayOverlap)
2309 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
2310 else
2311 TypeInfo = getContext().getTypeInfoInChars(Ty);
2312
2313 llvm::Value *SizeVal = nullptr;
2314 if (TypeInfo.Width.isZero()) {
2315 // But note that getTypeInfo returns 0 for a VLA.
2316 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2317 getContext().getAsArrayType(Ty))) {
2318 QualType BaseEltTy;
2319 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2320 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2321 assert(!TypeInfo.Width.isZero());
2322 SizeVal = Builder.CreateNUWMul(
2323 SizeVal,
2324 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2325 }
2326 }
2327 if (!SizeVal) {
2328 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2329 }
2330
2331 // FIXME: If we have a volatile struct, the optimizer can remove what might
2332 // appear to be `extra' memory ops:
2333 //
2334 // volatile struct { int i; } a, b;
2335 //
2336 // int main() {
2337 // a = b;
2338 // a = b;
2339 // }
2340 //
2341 // we need to use a different call here. We use isVolatile to indicate when
2342 // either the source or the destination is volatile.
2343
2344 DestPtr = DestPtr.withElementType(Int8Ty);
2345 SrcPtr = SrcPtr.withElementType(Int8Ty);
2346
2347 // Don't do any of the memmove_collectable tests if GC isn't set.
2348 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2349 // fall through
2350 } else if (const auto *Record = Ty->getAsRecordDecl()) {
2351 if (Record->hasObjectMember()) {
2352 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2353 SizeVal);
2354 return;
2355 }
2356 } else if (Ty->isArrayType()) {
2357 QualType BaseType = getContext().getBaseElementType(Ty);
2358 if (const auto *Record = BaseType->getAsRecordDecl()) {
2359 if (Record->hasObjectMember()) {
2360 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2361 SizeVal);
2362 return;
2363 }
2364 }
2365 }
2366
2367 auto *Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2368 addInstToCurrentSourceAtom(Inst, nullptr);
2369 emitPFPPostCopyUpdates(DestPtr, SrcPtr, Ty);
2370
2371 // Determine the metadata to describe the position of any padding in this
2372 // memcpy, as well as the TBAA tags for the members of the struct, in case
2373 // the optimizer wishes to expand it in to scalar memory operations.
2374 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2375 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2376
2377 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2378 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2379 Dest.getTBAAInfo(), Src.getTBAAInfo());
2380 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2381 }
2382}
Defines the clang::ASTContext interface.
#define V(N, I)
CompareKind
@ CK_Greater
@ CK_Less
@ CK_Equal
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
static void EmitHLSLScalarElementwiseAndSplatCasts(CodeGenFunction &CGF, LValue DestVal, llvm::Value *SrcVal, QualType SrcTy, SourceLocation Loc)
static bool isTrivialFiller(Expr *e)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
llvm::MachO::Record Record
Definition MachO.h:31
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
llvm::json::Array Array
static bool isVector(QualType QT, QualType ElementType)
This helper function returns true if QT is a vector type that has element type ElementType.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
llvm::APInt getArraySize() const
Definition Expr.h:5991
OpaqueValueExpr * getCommonExpr() const
Get the common subexpression shared by all initializations (the source array).
Definition Expr.h:5984
Expr * getSubExpr() const
Get the initializer to use for each array element.
Definition Expr.h:5989
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
QualType getElementType() const
Definition TypeBase.h:3789
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
CXXTemporary * getTemporary()
Definition ExprCXX.h:1515
const Expr * getSubExpr() const
Definition ExprCXX.h:1519
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1107
bool constructsVBase() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1796
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will call.
Definition ExprCXX.h:1792
bool inheritedFromVBase() const
Determine whether the inherited constructor is inherited from a virtual base of the object we constru...
Definition ExprCXX.h:1806
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5181
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5215
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialMoveAssignment() const
Determine whether this class has a trivial move assignment operator (C++11 [class....
Definition DeclCXX.h:1347
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition DeclCXX.cpp:610
bool hasTrivialMoveConstructor() const
Determine whether this class has a trivial move constructor (C++11 [class.copy]p12)
Definition DeclCXX.h:1307
bool hasTrivialCopyConstructor() const
Determine whether this class has a trivial copy constructor (C++ [class.copy]p6, C++11 [class....
Definition DeclCXX.h:1284
bool hasTrivialCopyAssignment() const
Determine whether this class has a trivial copy assignment operator (C++ [class.copy]p11,...
Definition DeclCXX.h:1334
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition DeclCXX.h:780
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:308
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
bool isValid() const
Definition Address.h:177
An aggregate value slot.
Definition CGValue.h:551
void setVolatile(bool flag)
Definition CGValue.h:670
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition CGValue.h:729
NeedsGCBarriers_t requiresGCollection() const
Definition CGValue.h:681
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
void setZeroed(bool V=true)
Definition CGValue.h:721
IsZeroed_t isZeroed() const
Definition CGValue.h:722
Qualifiers getQualifiers() const
Definition CGValue.h:664
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
IsAliased_t isPotentiallyAliased() const
Definition CGValue.h:701
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
IsDestructed_t isExternallyDestructed() const
Definition CGValue.h:657
Overlap_t mayOverlap() const
Definition CGValue.h:705
RValue asRValue() const
Definition CGValue.h:713
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition CGValue.h:687
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:430
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:229
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:591
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
void callCStructMoveConstructor(LValue Dst, LValue Src)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushRegularPartialArrayCleanup - Push an EH cleanup to destroy already-constructed elements of the gi...
Definition CGDecl.cpp:2615
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3034
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6560
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7333
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:697
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:7161
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2299
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to destroy already-constructed elements ...
Definition CGDecl.cpp:2599
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2272
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7338
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
Definition CGCall.cpp:1567
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:256
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6445
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2520
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
Definition CGDecl.cpp:2324
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
void callCStructCopyConstructor(LValue Dst, LValue Src)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1357
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5936
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
Definition CGClass.cpp:214
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:158
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6384
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1369
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:229
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2772
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2352
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:560
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:278
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:912
void emitPFPPostCopyUpdates(Address DestPtr, Address SrcPtr, QualType Ty)
Copy all PFP fields from SrcPtr to DestPtr while updating signatures, assuming that DestPtr was alrea...
CodeGenTypes & getTypes() const
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7342
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1677
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
Definition CGClass.cpp:2403
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:194
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:337
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1712
llvm::LLVMContext & getLLVMContext()
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1392
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition CGCleanup.h:654
LValue - This represents an lvalue references.
Definition CGValue.h:183
Address getAddress() const
Definition CGValue.h:373
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition CGValue.h:89
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
bool isComplex() const
Definition CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual LangAS getSRetAddrSpace(const CXXRecordDecl *RD) const
Get the address space for an indirect (sret) return of the given type.
Definition TargetInfo.h:327
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3330
const Expr * getInitializer() const
Definition Expr.h:3636
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3871
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool hasAttr() const
Definition DeclBase.h:585
InitListExpr * getUpdater() const
Definition Expr.h:5937
This represents one expression.
Definition Expr.h:112
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3178
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3281
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3263
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3414
const Expr * getSubExpr() const
Definition Expr.h:1065
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5426
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5484
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5402
const Expr * getInit(unsigned Init) const
Definition Expr.h:5354
ArrayRef< Expr * > inits() const
Definition Expr.h:5352
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument for this lambda expression.
Definition ExprCXX.h:2110
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition ExprCXX.h:2084
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument for this lambda expression (which initializes the first ca...
Definition ExprCXX.h:2098
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1402
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2202
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8520
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2961
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1556
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1528
Represents a struct/union/class.
Definition Decl.h:4343
bool hasObjectMember() const
Definition Decl.h:4403
field_range fields() const
Definition Decl.h:4546
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4543
RecordDecl * getDefinitionOrSelf() const
Definition Decl.h:4531
field_iterator field_begin() const
Definition Decl.cpp:5270
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
bool isUnion() const
Definition Decl.h:3946
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition TargetInfo.h:490
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8776
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8772
bool isPointerType() const
Definition TypeBase.h:8673
bool isReferenceType() const
Definition TypeBase.h:8697
bool isScalarType() const
Definition TypeBase.h:9145
bool isVariableArrayType() const
Definition TypeBase.h:8784
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition Type.cpp:5459
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9161
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8808
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2310
bool isMemberPointerType() const
Definition TypeBase.h:8754
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition Type.cpp:5468
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition Type.cpp:2397
bool isVectorType() const
Definition TypeBase.h:8812
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2405
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2976
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
bool isNullPtrType() const
Definition TypeBase.h:9076
bool isRecordType() const
Definition TypeBase.h:8800
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:924
Represents a GCC generic vector type.
Definition TypeBase.h:4230
Definition SPIR.cpp:35
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1471
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * CharTy
char