clang 22.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
27#include <optional>
28
29using namespace clang;
30using namespace clang::CIRGen;
31using namespace cir;
32
33/// Get the address of a zero-sized field within a record. The resulting address
34/// doesn't necessarily have the right type.
36 const FieldDecl *field,
37 llvm::StringRef fieldName,
38 unsigned fieldIndex) {
39 if (field->isZeroSize(getContext())) {
40 cgm.errorNYI(field->getSourceRange(),
41 "emitAddrOfFieldStorage: zero-sized field");
42 return Address::invalid();
43 }
44
45 mlir::Location loc = getLoc(field->getLocation());
46
47 mlir::Type fieldType = convertType(field->getType());
48 auto fieldPtr = cir::PointerType::get(fieldType);
49 // For most cases fieldName is the same as field->getName() but for lambdas,
50 // which do not currently carry the name, so it can be passed down from the
51 // CaptureStmt.
52 cir::GetMemberOp memberAddr = builder.createGetMember(
53 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
54
55 // Retrieve layout information, compute alignment and return the final
56 // address.
57 const RecordDecl *rec = field->getParent();
58 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
59 unsigned idx = layout.getCIRFieldNo(field);
61 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
62 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
63}
64
65/// Given an expression of pointer type, try to
66/// derive a more accurate bound on the alignment of the pointer.
68 LValueBaseInfo *baseInfo) {
69 // We allow this with ObjC object pointers because of fragile ABIs.
70 assert(expr->getType()->isPointerType() ||
71 expr->getType()->isObjCObjectPointerType());
72 expr = expr->IgnoreParens();
73
74 // Casts:
75 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
76 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
77 cgm.emitExplicitCastExprType(ece);
78
79 switch (ce->getCastKind()) {
80 // Non-converting casts (but not C's implicit conversion from void*).
81 case CK_BitCast:
82 case CK_NoOp:
83 case CK_AddressSpaceConversion: {
84 if (const auto *ptrTy =
85 ce->getSubExpr()->getType()->getAs<PointerType>()) {
86 if (ptrTy->getPointeeType()->isVoidType())
87 break;
88
89 LValueBaseInfo innerBaseInfo;
91 Address addr =
92 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
93 if (baseInfo)
94 *baseInfo = innerBaseInfo;
95
96 if (isa<ExplicitCastExpr>(ce)) {
97 LValueBaseInfo targetTypeBaseInfo;
98
99 const QualType pointeeType = expr->getType()->getPointeeType();
100 const CharUnits align =
101 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
102
103 // If the source l-value is opaque, honor the alignment of the
104 // casted-to type.
105 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
106 if (baseInfo)
107 baseInfo->mergeForCast(targetTypeBaseInfo);
108 addr = Address(addr.getPointer(), addr.getElementType(), align);
109 }
110 }
111
113
114 const mlir::Type eltTy =
115 convertTypeForMem(expr->getType()->getPointeeType());
116 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
117 addr, eltTy);
119
120 return addr;
121 }
122 break;
123 }
124
125 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
126 case CK_ArrayToPointerDecay:
127 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
128
129 case CK_UncheckedDerivedToBase:
130 case CK_DerivedToBase: {
133 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
134 const CXXRecordDecl *derived =
135 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
136 return getAddressOfBaseClass(addr, derived, ce->path(),
138 ce->getExprLoc());
139 }
140
141 case CK_AnyPointerToBlockPointerCast:
142 case CK_BaseToDerived:
143 case CK_BaseToDerivedMemberPointer:
144 case CK_BlockPointerToObjCPointerCast:
145 case CK_BuiltinFnToFnPtr:
146 case CK_CPointerToObjCPointerCast:
147 case CK_DerivedToBaseMemberPointer:
148 case CK_Dynamic:
149 case CK_FunctionToPointerDecay:
150 case CK_IntegralToPointer:
151 case CK_LValueToRValue:
152 case CK_LValueToRValueBitCast:
153 case CK_NullToMemberPointer:
154 case CK_NullToPointer:
155 case CK_ReinterpretMemberPointer:
156 // Common pointer conversions, nothing to do here.
157 // TODO: Is there any reason to treat base-to-derived conversions
158 // specially?
159 break;
160
161 case CK_ARCConsumeObject:
162 case CK_ARCExtendBlockObject:
163 case CK_ARCProduceObject:
164 case CK_ARCReclaimReturnedObject:
165 case CK_AtomicToNonAtomic:
166 case CK_BooleanToSignedIntegral:
167 case CK_ConstructorConversion:
168 case CK_CopyAndAutoreleaseBlockObject:
169 case CK_Dependent:
170 case CK_FixedPointCast:
171 case CK_FixedPointToBoolean:
172 case CK_FixedPointToFloating:
173 case CK_FixedPointToIntegral:
174 case CK_FloatingCast:
175 case CK_FloatingComplexCast:
176 case CK_FloatingComplexToBoolean:
177 case CK_FloatingComplexToIntegralComplex:
178 case CK_FloatingComplexToReal:
179 case CK_FloatingRealToComplex:
180 case CK_FloatingToBoolean:
181 case CK_FloatingToFixedPoint:
182 case CK_FloatingToIntegral:
183 case CK_HLSLAggregateSplatCast:
184 case CK_HLSLArrayRValue:
185 case CK_HLSLElementwiseCast:
186 case CK_HLSLVectorTruncation:
187 case CK_IntToOCLSampler:
188 case CK_IntegralCast:
189 case CK_IntegralComplexCast:
190 case CK_IntegralComplexToBoolean:
191 case CK_IntegralComplexToFloatingComplex:
192 case CK_IntegralComplexToReal:
193 case CK_IntegralRealToComplex:
194 case CK_IntegralToBoolean:
195 case CK_IntegralToFixedPoint:
196 case CK_IntegralToFloating:
197 case CK_LValueBitCast:
198 case CK_MatrixCast:
199 case CK_MemberPointerToBoolean:
200 case CK_NonAtomicToAtomic:
201 case CK_ObjCObjectLValueCast:
202 case CK_PointerToBoolean:
203 case CK_PointerToIntegral:
204 case CK_ToUnion:
205 case CK_ToVoid:
206 case CK_UserDefinedConversion:
207 case CK_VectorSplat:
208 case CK_ZeroToOCLOpaqueType:
209 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
210 }
211 }
212
213 // Unary &
214 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
215 // TODO(cir): maybe we should use cir.unary for pointers here instead.
216 if (uo->getOpcode() == UO_AddrOf) {
217 LValue lv = emitLValue(uo->getSubExpr());
218 if (baseInfo)
219 *baseInfo = lv.getBaseInfo();
221 return lv.getAddress();
222 }
223 }
224
225 // std::addressof and variants.
226 if (auto const *call = dyn_cast<CallExpr>(expr)) {
227 switch (call->getBuiltinCallee()) {
228 default:
229 break;
230 case Builtin::BIaddressof:
231 case Builtin::BI__addressof:
232 case Builtin::BI__builtin_addressof: {
233 cgm.errorNYI(expr->getSourceRange(),
234 "emitPointerWithAlignment: builtin addressof");
235 return Address::invalid();
236 }
237 }
238 }
239
240 // Otherwise, use the alignment of the type.
242 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
243 /*forPointeeType=*/true, baseInfo);
244}
245
247 bool isInit) {
248 if (!dst.isSimple()) {
249 if (dst.isVectorElt()) {
250 // Read/modify/write the vector, inserting the new element
251 const mlir::Location loc = dst.getVectorPointer().getLoc();
252 const mlir::Value vector =
253 builder.createLoad(loc, dst.getVectorAddress());
254 const mlir::Value newVector = cir::VecInsertOp::create(
255 builder, loc, vector, src.getValue(), dst.getVectorIdx());
256 builder.createStore(loc, newVector, dst.getVectorAddress());
257 return;
258 }
259
260 assert(dst.isBitField() && "Unknown LValue type");
262 return;
263
264 cgm.errorNYI(dst.getPointer().getLoc(),
265 "emitStoreThroughLValue: non-simple lvalue");
266 return;
267 }
268
270
271 assert(src.isScalar() && "Can't emit an aggregate store with this method");
272 emitStoreOfScalar(src.getValue(), dst, isInit);
273}
274
275static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
276 const VarDecl *vd) {
277 QualType t = e->getType();
278
279 // If it's thread_local, emit a call to its wrapper function instead.
281 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
282 cgf.cgm.errorNYI(e->getSourceRange(),
283 "emitGlobalVarDeclLValue: thread_local variable");
284
285 // Check if the variable is marked as declare target with link clause in
286 // device codegen.
287 if (cgf.getLangOpts().OpenMP)
288 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
289
290 // Traditional LLVM codegen handles thread local separately, CIR handles
291 // as part of getAddrOfGlobalVar.
292 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
293
295 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
296 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
297 if (realPtrTy != v.getType())
298 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
299
300 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
301 Address addr(v, realVarTy, alignment);
302 LValue lv;
303 if (vd->getType()->isReferenceType())
304 cgf.cgm.errorNYI(e->getSourceRange(),
305 "emitGlobalVarDeclLValue: reference type");
306 else
307 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
309 return lv;
310}
311
312void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
313 bool isVolatile, QualType ty,
314 bool isInit, bool isNontemporal) {
316
317 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
318 // Boolean vectors use `iN` as storage type.
319 if (clangVecTy->isExtVectorBoolType())
320 cgm.errorNYI(addr.getPointer().getLoc(),
321 "emitStoreOfScalar ExtVectorBoolType");
322
323 // Handle vectors of size 3 like size 4 for better performance.
324 const mlir::Type elementType = addr.getElementType();
325 const auto vecTy = cast<cir::VectorType>(elementType);
326
327 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
329 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
330 cgm.errorNYI(addr.getPointer().getLoc(),
331 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
332 }
333
334 value = emitToMemory(value, ty);
335
337
338 // Update the alloca with more info on initialization.
339 assert(addr.getPointer() && "expected pointer to exist");
340 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
341 if (currVarDecl && srcAlloca) {
342 const VarDecl *vd = currVarDecl;
343 assert(vd && "VarDecl expected");
344 if (vd->hasInit())
345 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
346 }
347
348 assert(currSrcLoc && "must pass in source location");
349 builder.createStore(*currSrcLoc, value, addr, isVolatile);
350
351 if (isNontemporal) {
352 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
353 return;
354 }
355
357}
358
359// TODO: Replace this with a proper TargetInfo function call.
360/// Helper method to check if the underlying ABI is AAPCS
361static bool isAAPCS(const TargetInfo &targetInfo) {
362 return targetInfo.getABI().starts_with("aapcs");
363}
364
366 LValue dst) {
367
368 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
369 mlir::Type resLTy = convertTypeForMem(dst.getType());
370 Address ptr = dst.getBitFieldAddress();
371
372 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
373 dst.isVolatileQualified() &&
374 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
375
376 mlir::Value dstAddr = dst.getAddress().getPointer();
377
378 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
379 ptr.getElementType(), src.getValue(), info,
380 dst.isVolatileQualified(), useVoaltile);
381}
382
384 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
385
386 // Get the output type.
387 mlir::Type resLTy = convertType(lv.getType());
388 Address ptr = lv.getBitFieldAddress();
389
390 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
391 isAAPCS(cgm.getTarget());
392
393 mlir::Value field =
394 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
395 info, lv.isVolatile(), useVoaltile);
397 return RValue::get(field);
398}
399
401 const FieldDecl *field,
402 mlir::Type fieldType,
403 unsigned index) {
404 mlir::Location loc = getLoc(field->getLocation());
405 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
407 cir::GetMemberOp sea = getBuilder().createGetMember(
408 loc, fieldPtr, base.getPointer(), field->getName(),
409 rec.isUnion() ? field->getFieldIndex() : index);
411 rec.getElementOffset(cgm.getDataLayout().layout, index));
412 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
413}
414
416 const FieldDecl *field) {
417 LValueBaseInfo baseInfo = base.getBaseInfo();
418 const CIRGenRecordLayout &layout =
419 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
420 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
421
423
424 unsigned idx = layout.getCIRFieldNo(field);
425 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
426
427 mlir::Location loc = getLoc(field->getLocation());
428 if (addr.getElementType() != info.storageType)
429 addr = builder.createElementBitCast(loc, addr, info.storageType);
430
431 QualType fieldType =
433 // TODO(cir): Support TBAA for bit fields.
435 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
436 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
437}
438
440 LValueBaseInfo baseInfo = base.getBaseInfo();
441
442 if (field->isBitField())
443 return emitLValueForBitField(base, field);
444
445 QualType fieldType = field->getType();
446 const RecordDecl *rec = field->getParent();
447 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
448 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
450
451 Address addr = base.getAddress();
452 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
453 if (cgm.getCodeGenOpts().StrictVTablePointers &&
454 classDecl->isDynamicClass()) {
455 cgm.errorNYI(field->getSourceRange(),
456 "emitLValueForField: strict vtable for dynamic class");
457 }
458 }
459
460 unsigned recordCVR = base.getVRQualifiers();
461
462 llvm::StringRef fieldName = field->getName();
463 unsigned fieldIndex;
464 if (cgm.lambdaFieldToName.count(field))
465 fieldName = cgm.lambdaFieldToName[field];
466
467 if (rec->isUnion())
468 fieldIndex = field->getFieldIndex();
469 else {
470 const CIRGenRecordLayout &layout =
471 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
472 fieldIndex = layout.getCIRFieldNo(field);
473 }
474
475 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
477
478 // If this is a reference field, load the reference right now.
479 if (fieldType->isReferenceType()) {
481 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
482 if (recordCVR & Qualifiers::Volatile)
483 refLVal.getQuals().addVolatile();
484 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
485 &fieldBaseInfo);
486
487 // Qualifiers on the struct don't apply to the referencee.
488 recordCVR = 0;
489 fieldType = fieldType->getPointeeType();
490 }
491
492 if (field->hasAttr<AnnotateAttr>()) {
493 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
494 return LValue();
495 }
496
497 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
498 lv.getQuals().addCVRQualifiers(recordCVR);
499
500 // __weak attribute on a field is ignored.
502 cgm.errorNYI(field->getSourceRange(),
503 "emitLValueForField: __weak attribute");
504 return LValue();
505 }
506
507 return lv;
508}
509
511 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
512 QualType fieldType = field->getType();
513
514 if (!fieldType->isReferenceType())
515 return emitLValueForField(base, field);
516
517 const CIRGenRecordLayout &layout =
518 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
519 unsigned fieldIndex = layout.getCIRFieldNo(field);
520
521 Address v =
522 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
523
524 // Make sure that the address is pointing to the right type.
525 mlir::Type memTy = convertTypeForMem(fieldType);
526 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
527
528 // TODO: Generate TBAA information that describes this access as a structure
529 // member access and not just an access to an object of the field's type. This
530 // should be similar to what we do in EmitLValueForField().
531 LValueBaseInfo baseInfo = base.getBaseInfo();
532 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
533 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
535 return makeAddrLValue(v, fieldType, fieldBaseInfo);
536}
537
538mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
539 // Bool has a different representation in memory than in registers,
540 // but in ClangIR, it is simply represented as a cir.bool value.
541 // This function is here as a placeholder for possible future changes.
542 return value;
543}
544
545void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
546 bool isInit) {
547 if (lvalue.getType()->isConstantMatrixType()) {
548 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
549 return;
550 }
551
552 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
553 lvalue.getType(), isInit, /*isNontemporal=*/false);
554}
555
556mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
557 QualType ty, SourceLocation loc,
558 LValueBaseInfo baseInfo) {
560 mlir::Type eltTy = addr.getElementType();
561
562 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
563 if (clangVecTy->isExtVectorBoolType()) {
564 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
565 return nullptr;
566 }
567
568 const auto vecTy = cast<cir::VectorType>(eltTy);
569
570 // Handle vectors of size 3 like size 4 for better performance.
572 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
573 cgm.errorNYI(addr.getPointer().getLoc(),
574 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
575 }
576
578 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
579 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
580 cgm.errorNYI("emitLoadOfScalar: load atomic");
581
582 if (mlir::isa<cir::VoidType>(eltTy))
583 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
584
586
587 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
588 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
589 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
590
591 return loadOp;
592}
593
595 SourceLocation loc) {
598 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
599 lvalue.getType(), loc, lvalue.getBaseInfo());
600}
601
602/// Given an expression that represents a value lvalue, this
603/// method emits the address of the lvalue, then loads the result as an rvalue,
604/// returning the rvalue.
606 assert(!lv.getType()->isFunctionType());
607 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
608
609 if (lv.isBitField())
610 return emitLoadOfBitfieldLValue(lv, loc);
611
612 if (lv.isSimple())
613 return RValue::get(emitLoadOfScalar(lv, loc));
614
615 if (lv.isVectorElt()) {
616 const mlir::Value load =
617 builder.createLoad(getLoc(loc), lv.getVectorAddress());
618 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
619 lv.getVectorIdx()));
620 }
621
622 cgm.errorNYI(loc, "emitLoadOfLValue");
623 return RValue::get(nullptr);
624}
625
626static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
628 return cgm.getAddrOfFunction(gd);
629}
630
632 mlir::Value thisValue) {
633 return cgf.emitLValueForLambdaField(fd, thisValue);
634}
635
636/// Given that we are currently emitting a lambda, emit an l-value for
637/// one of its members.
638///
640 mlir::Value thisValue) {
641 bool hasExplicitObjectParameter = false;
642 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
643 LValue lambdaLV;
644 if (methD) {
645 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
646 assert(methD->getParent()->isLambda());
647 assert(methD->getParent() == field->getParent());
648 }
649 if (hasExplicitObjectParameter) {
650 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
651 } else {
652 QualType lambdaTagType =
654 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
655 }
656 return emitLValueForField(lambdaLV, field);
657}
658
662
663static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
664 GlobalDecl gd) {
665 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
666 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
667 mlir::Location loc = cgf.getLoc(e->getSourceRange());
668 CharUnits align = cgf.getContext().getDeclAlign(fd);
669
671
672 mlir::Type fnTy = funcOp.getFunctionType();
673 mlir::Type ptrTy = cir::PointerType::get(fnTy);
674 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
675 funcOp.getSymName());
676
677 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
678 fnTy = cgf.convertType(fd->getType());
679 ptrTy = cir::PointerType::get(fnTy);
680
681 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
682 cir::CastKind::bitcast, addr);
683 }
684
685 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
687}
688
689/// Determine whether we can emit a reference to \p vd from the current
690/// context, despite not necessarily having seen an odr-use of the variable in
691/// this context.
692/// TODO(cir): This could be shared with classic codegen.
694 const DeclRefExpr *e,
695 const VarDecl *vd) {
696 // For a variable declared in an enclosing scope, do not emit a spurious
697 // reference even if we have a capture, as that will emit an unwarranted
698 // reference to our capture state, and will likely generate worse code than
699 // emitting a local copy.
701 return false;
702
703 // For a local declaration declared in this function, we can always reference
704 // it even if we don't have an odr-use.
705 if (vd->hasLocalStorage()) {
706 return vd->getDeclContext() ==
707 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
708 }
709
710 // For a global declaration, we can emit a reference to it if we know
711 // for sure that we are able to emit a definition of it.
712 vd = vd->getDefinition(cgf.getContext());
713 if (!vd)
714 return false;
715
716 // Don't emit a spurious reference if it might be to a variable that only
717 // exists on a different device / target.
718 // FIXME: This is unnecessarily broad. Check whether this would actually be a
719 // cross-target reference.
720 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
721 cgf.getLangOpts().OpenCL) {
722 return false;
723 }
724
725 // We can emit a spurious reference only if the linkage implies that we'll
726 // be emitting a non-interposable symbol that will be retained until link
727 // time.
728 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
729 case cir::GlobalLinkageKind::ExternalLinkage:
730 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
731 case cir::GlobalLinkageKind::WeakODRLinkage:
732 case cir::GlobalLinkageKind::InternalLinkage:
733 case cir::GlobalLinkageKind::PrivateLinkage:
734 return true;
735 default:
736 return false;
737 }
738}
739
741 const NamedDecl *nd = e->getDecl();
742 QualType ty = e->getType();
743
744 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
745 "should not emit an unevaluated operand");
746
747 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
748 // Global Named registers access via intrinsics only
749 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
750 !vd->isLocalVarDecl()) {
751 cgm.errorNYI(e->getSourceRange(),
752 "emitDeclRefLValue: Global Named registers access");
753 return LValue();
754 }
755
756 if (e->isNonOdrUse() == NOUR_Constant &&
757 (vd->getType()->isReferenceType() ||
758 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
759 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
760 return LValue();
761 }
762
763 // Check for captured variables.
765 vd = vd->getCanonicalDecl();
766 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
767 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
770 }
771 }
772
773 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
774 // Checks for omitted feature handling
781
782 // Check if this is a global variable
783 if (vd->hasLinkage() || vd->isStaticDataMember())
784 return emitGlobalVarDeclLValue(*this, e, vd);
785
786 Address addr = Address::invalid();
787
788 // The variable should generally be present in the local decl map.
789 auto iter = localDeclMap.find(vd);
790 if (iter != localDeclMap.end()) {
791 addr = iter->second;
792 } else {
793 // Otherwise, it might be static local we haven't emitted yet for some
794 // reason; most likely, because it's in an outer function.
795 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
796 }
797
798 // Drill into reference types.
799 LValue lv =
800 vd->getType()->isReferenceType()
804
805 // Statics are defined as globals, so they are not include in the function's
806 // symbol table.
807 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
808 "non-static locals should be already mapped");
809
810 return lv;
811 }
812
813 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
816 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
817 return LValue();
818 }
819 return emitLValue(bd->getBinding());
820 }
821
822 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
823 LValue lv = emitFunctionDeclLValue(*this, e, fd);
824
825 // Emit debuginfo for the function declaration if the target wants to.
826 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
828
829 return lv;
830 }
831
832 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
833 return LValue();
834}
835
837 QualType boolTy = getContext().BoolTy;
838 SourceLocation loc = e->getExprLoc();
839
841 if (e->getType()->getAs<MemberPointerType>()) {
842 cgm.errorNYI(e->getSourceRange(),
843 "evaluateExprAsBool: member pointer type");
844 return createDummyValue(getLoc(loc), boolTy);
845 }
846
848 if (!e->getType()->isAnyComplexType())
849 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
850
852 loc);
853}
854
857
858 // __extension__ doesn't affect lvalue-ness.
859 if (op == UO_Extension)
860 return emitLValue(e->getSubExpr());
861
862 switch (op) {
863 case UO_Deref: {
865 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
866
868 LValueBaseInfo baseInfo;
869 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
870
871 // Tag 'load' with deref attribute.
872 // FIXME: This misses some derefence cases and has problematic interactions
873 // with other operators.
874 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
875 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
876
877 LValue lv = makeAddrLValue(addr, t, baseInfo);
880 return lv;
881 }
882 case UO_Real:
883 case UO_Imag: {
884 LValue lv = emitLValue(e->getSubExpr());
885 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
886
887 // __real is valid on scalars. This is a faster way of testing that.
888 // __imag can only produce an rvalue on scalars.
889 if (e->getOpcode() == UO_Real &&
890 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
891 assert(e->getSubExpr()->getType()->isArithmeticType());
892 return lv;
893 }
894
896 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
897 mlir::Location loc = getLoc(e->getExprLoc());
898 Address component =
899 e->getOpcode() == UO_Real
900 ? builder.createComplexRealPtr(loc, lv.getAddress())
901 : builder.createComplexImagPtr(loc, lv.getAddress());
903 LValue elemLV = makeAddrLValue(component, elemTy);
904 elemLV.getQuals().addQualifiers(lv.getQuals());
905 return elemLV;
906 }
907 case UO_PreInc:
908 case UO_PreDec: {
909 cir::UnaryOpKind kind =
910 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
911 LValue lv = emitLValue(e->getSubExpr());
912
913 assert(e->isPrefix() && "Prefix operator in unexpected state!");
914
915 if (e->getType()->isAnyComplexType()) {
916 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
917 } else {
918 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
919 }
920
921 return lv;
922 }
923 case UO_Extension:
924 llvm_unreachable("UnaryOperator extension should be handled above!");
925 case UO_Plus:
926 case UO_Minus:
927 case UO_Not:
928 case UO_LNot:
929 case UO_AddrOf:
930 case UO_PostInc:
931 case UO_PostDec:
932 case UO_Coawait:
933 llvm_unreachable("UnaryOperator of non-lvalue kind!");
934 }
935 llvm_unreachable("Unknown unary operator kind!");
936}
937
938/// If the specified expr is a simple decay from an array to pointer,
939/// return the array subexpression.
940/// FIXME: this could be abstracted into a common AST helper.
941static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
942 // If this isn't just an array->pointer decay, bail out.
943 const auto *castExpr = dyn_cast<CastExpr>(e);
944 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
945 return nullptr;
946
947 // If this is a decay from variable width array, bail out.
948 const Expr *subExpr = castExpr->getSubExpr();
949 if (subExpr->getType()->isVariableArrayType())
950 return nullptr;
951
952 return subExpr;
953}
954
955static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
956 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
957 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
958 return constantOp.getValueAttr<cir::IntAttr>();
959 return {};
960}
961
962static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
963 CharUnits eltSize) {
964 // If we have a constant index, we can use the exact offset of the
965 // element we're accessing.
966 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
967 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
968 return arrayAlign.alignmentAtOffset(offset);
969 }
970 // Otherwise, use the worst-case alignment for any element.
971 return arrayAlign.alignmentOfArrayElement(eltSize);
972}
973
975 const VariableArrayType *vla) {
976 QualType eltType;
977 do {
978 eltType = vla->getElementType();
979 } while ((vla = astContext.getAsVariableArrayType(eltType)));
980 return eltType;
981}
982
983static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf,
984 mlir::Location beginLoc,
985 mlir::Location endLoc, mlir::Value ptr,
986 mlir::Type eltTy, mlir::Value idx,
987 bool shouldDecay) {
988 CIRGenModule &cgm = cgf.getCIRGenModule();
989 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
990 // that would enhance tracking this later in CIR?
992 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
993 shouldDecay);
994}
995
997 mlir::Location beginLoc,
998 mlir::Location endLoc, Address addr,
999 QualType eltType, mlir::Value idx,
1000 mlir::Location loc, bool shouldDecay) {
1001
1002 // Determine the element size of the statically-sized base. This is
1003 // the thing that the indices are expressed in terms of.
1004 if (const VariableArrayType *vla =
1005 cgf.getContext().getAsVariableArrayType(eltType)) {
1006 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1007 }
1008
1009 // We can use that to compute the best alignment of the element.
1010 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1011 const CharUnits eltAlign =
1012 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1013
1015 const mlir::Value eltPtr =
1016 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1017 addr.getElementType(), idx, shouldDecay);
1018 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1019 return Address(eltPtr, elementType, eltAlign);
1020}
1021
1022LValue
1025 cgm.errorNYI(e->getSourceRange(),
1026 "emitArraySubscriptExpr: ExtVectorElementExpr");
1028 }
1029
1030 if (getContext().getAsVariableArrayType(e->getType())) {
1031 cgm.errorNYI(e->getSourceRange(),
1032 "emitArraySubscriptExpr: VariableArrayType");
1034 }
1035
1036 if (e->getType()->getAs<ObjCObjectType>()) {
1037 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1039 }
1040
1041 // The index must always be an integer, which is not an aggregate. Emit it
1042 // in lexical order (this complexity is, sadly, required by C++17).
1043 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1044 "index was neither LHS nor RHS");
1045
1046 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1047 const mlir::Value idx = emitScalarExpr(e->getIdx());
1048
1049 // Extend or truncate the index type to 32 or 64-bits.
1050 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1051 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1052 cgm.errorNYI(e->getSourceRange(),
1053 "emitArraySubscriptExpr: index type cast");
1054 return idx;
1055 };
1056
1057 // If the base is a vector type, then we are forming a vector element
1058 // with this subscript.
1059 if (e->getBase()->getType()->isVectorType() &&
1061 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1062 const LValue lhs = emitLValue(e->getBase());
1063 return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(),
1064 lhs.getBaseInfo());
1065 }
1066
1067 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1068 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1069 LValue arrayLV;
1070 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1071 arrayLV = emitArraySubscriptExpr(ase);
1072 else
1073 arrayLV = emitLValue(array);
1074
1075 // Propagate the alignment from the array itself to the result.
1076 const Address addr = emitArraySubscriptPtr(
1077 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1078 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1079 /*shouldDecay=*/true);
1080
1081 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1082
1083 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1084 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1085 }
1086
1087 return lv;
1088 }
1089
1090 // The base must be a pointer; emit it with an estimate of its alignment.
1091 assert(e->getBase()->getType()->isPointerType() &&
1092 "The base must be a pointer");
1093
1094 LValueBaseInfo eltBaseInfo;
1095 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1096 // Propagate the alignment from the array itself to the result.
1097 const Address addxr = emitArraySubscriptPtr(
1098 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1099 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1100 /*shouldDecay=*/false);
1101
1102 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1103
1104 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1105 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1106 }
1107
1108 return lv;
1109}
1110
1112 llvm::StringRef name) {
1113 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1114 assert(globalOp.getAlignment() && "expected alignment for string literal");
1115 unsigned align = *(globalOp.getAlignment());
1116 mlir::Value addr =
1117 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1118 return makeAddrLValue(
1119 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1121}
1122
1123/// Casts are never lvalues unless that cast is to a reference type. If the cast
1124/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1125/// is needed by the code generator in an lvalue context, then it must mean that
1126/// we need the address of an aggregate in order to access one of its members.
1127/// This can happen for all the reasons that casts are permitted with aggregate
1128/// result, including noop aggregate casts, and cast from scalar to union.
1130 switch (e->getCastKind()) {
1131 case CK_ToVoid:
1132 case CK_BitCast:
1133 case CK_LValueToRValueBitCast:
1134 case CK_ArrayToPointerDecay:
1135 case CK_FunctionToPointerDecay:
1136 case CK_NullToMemberPointer:
1137 case CK_NullToPointer:
1138 case CK_IntegralToPointer:
1139 case CK_PointerToIntegral:
1140 case CK_PointerToBoolean:
1141 case CK_IntegralCast:
1142 case CK_BooleanToSignedIntegral:
1143 case CK_IntegralToBoolean:
1144 case CK_IntegralToFloating:
1145 case CK_FloatingToIntegral:
1146 case CK_FloatingToBoolean:
1147 case CK_FloatingCast:
1148 case CK_FloatingRealToComplex:
1149 case CK_FloatingComplexToReal:
1150 case CK_FloatingComplexToBoolean:
1151 case CK_FloatingComplexCast:
1152 case CK_FloatingComplexToIntegralComplex:
1153 case CK_IntegralRealToComplex:
1154 case CK_IntegralComplexToReal:
1155 case CK_IntegralComplexToBoolean:
1156 case CK_IntegralComplexCast:
1157 case CK_IntegralComplexToFloatingComplex:
1158 case CK_DerivedToBaseMemberPointer:
1159 case CK_BaseToDerivedMemberPointer:
1160 case CK_MemberPointerToBoolean:
1161 case CK_ReinterpretMemberPointer:
1162 case CK_AnyPointerToBlockPointerCast:
1163 case CK_ARCProduceObject:
1164 case CK_ARCConsumeObject:
1165 case CK_ARCReclaimReturnedObject:
1166 case CK_ARCExtendBlockObject:
1167 case CK_CopyAndAutoreleaseBlockObject:
1168 case CK_IntToOCLSampler:
1169 case CK_FloatingToFixedPoint:
1170 case CK_FixedPointToFloating:
1171 case CK_FixedPointCast:
1172 case CK_FixedPointToBoolean:
1173 case CK_FixedPointToIntegral:
1174 case CK_IntegralToFixedPoint:
1175 case CK_MatrixCast:
1176 case CK_HLSLVectorTruncation:
1177 case CK_HLSLArrayRValue:
1178 case CK_HLSLElementwiseCast:
1179 case CK_HLSLAggregateSplatCast:
1180 llvm_unreachable("unexpected cast lvalue");
1181
1182 case CK_Dependent:
1183 llvm_unreachable("dependent cast kind in IR gen!");
1184
1185 case CK_BuiltinFnToFnPtr:
1186 llvm_unreachable("builtin functions are handled elsewhere");
1187
1188 case CK_Dynamic: {
1189 LValue lv = emitLValue(e->getSubExpr());
1190 Address v = lv.getAddress();
1191 const auto *dce = cast<CXXDynamicCastExpr>(e);
1193 }
1194
1195 // These are never l-values; just use the aggregate emission code.
1196 case CK_NonAtomicToAtomic:
1197 case CK_AtomicToNonAtomic:
1198 case CK_ToUnion:
1199 case CK_BaseToDerived:
1200 case CK_AddressSpaceConversion:
1201 case CK_ObjCObjectLValueCast:
1202 case CK_VectorSplat:
1203 case CK_ConstructorConversion:
1204 case CK_UserDefinedConversion:
1205 case CK_CPointerToObjCPointerCast:
1206 case CK_BlockPointerToObjCPointerCast:
1207 case CK_LValueToRValue: {
1208 cgm.errorNYI(e->getSourceRange(),
1209 std::string("emitCastLValue for unhandled cast kind: ") +
1210 e->getCastKindName());
1211
1212 return {};
1213 }
1214
1215 case CK_LValueBitCast: {
1216 // This must be a reinterpret_cast (or c-style equivalent).
1217 const auto *ce = cast<ExplicitCastExpr>(e);
1218
1219 cgm.emitExplicitCastExprType(ce, this);
1220 LValue LV = emitLValue(e->getSubExpr());
1222 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1223
1224 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1225 }
1226
1227 case CK_NoOp: {
1228 // CK_NoOp can model a qualification conversion, which can remove an array
1229 // bound and change the IR type.
1230 LValue lv = emitLValue(e->getSubExpr());
1231 // Propagate the volatile qualifier to LValue, if exists in e.
1233 cgm.errorNYI(e->getSourceRange(),
1234 "emitCastLValue: NoOp changes volatile qual");
1235 if (lv.isSimple()) {
1236 Address v = lv.getAddress();
1237 if (v.isValid()) {
1238 mlir::Type ty = convertTypeForMem(e->getType());
1239 if (v.getElementType() != ty)
1240 cgm.errorNYI(e->getSourceRange(),
1241 "emitCastLValue: NoOp needs bitcast");
1242 }
1243 }
1244 return lv;
1245 }
1246
1247 case CK_UncheckedDerivedToBase:
1248 case CK_DerivedToBase: {
1249 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1250
1251 LValue lv = emitLValue(e->getSubExpr());
1252 Address thisAddr = lv.getAddress();
1253
1254 // Perform the derived-to-base conversion
1255 Address baseAddr =
1256 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1257 /*NullCheckValue=*/false, e->getExprLoc());
1258
1259 // TODO: Support accesses to members of base classes in TBAA. For now, we
1260 // conservatively pretend that the complete object is of the base class
1261 // type.
1263 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1264 }
1265
1266 case CK_ZeroToOCLOpaqueType:
1267 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1268 }
1269
1270 llvm_unreachable("Invalid cast kind");
1271}
1272
1274 const MemberExpr *me) {
1275 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1276 // Try to emit static variable member expressions as DREs.
1277 return DeclRefExpr::Create(
1279 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1280 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1281 }
1282 return nullptr;
1283}
1284
1286 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1288 return emitDeclRefLValue(dre);
1289 }
1290
1291 Expr *baseExpr = e->getBase();
1292 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1293 LValue baseLV;
1294 if (e->isArrow()) {
1295 LValueBaseInfo baseInfo;
1297 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1298 QualType ptrTy = baseExpr->getType()->getPointeeType();
1300 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1301 } else {
1303 baseLV = emitLValue(baseExpr);
1304 }
1305
1306 const NamedDecl *nd = e->getMemberDecl();
1307 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1308 LValue lv = emitLValueForField(baseLV, field);
1310 if (getLangOpts().OpenMP) {
1311 // If the member was explicitly marked as nontemporal, mark it as
1312 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1313 // to children as nontemporal too.
1314 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1315 }
1316 return lv;
1317 }
1318
1319 if (isa<FunctionDecl>(nd)) {
1320 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1321 return LValue();
1322 }
1323
1324 llvm_unreachable("Unhandled member declaration!");
1325}
1326
1327/// Evaluate an expression into a given memory location.
1329 Qualifiers quals, bool isInit) {
1330 // FIXME: This function should take an LValue as an argument.
1331 switch (getEvaluationKind(e->getType())) {
1332 case cir::TEK_Complex: {
1333 LValue lv = makeAddrLValue(location, e->getType());
1334 emitComplexExprIntoLValue(e, lv, isInit);
1335 return;
1336 }
1337
1338 case cir::TEK_Aggregate: {
1339 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1343 return;
1344 }
1345
1346 case cir::TEK_Scalar: {
1348 LValue lv = makeAddrLValue(location, e->getType());
1349 emitStoreThroughLValue(rv, lv);
1350 return;
1351 }
1352 }
1353
1354 llvm_unreachable("bad evaluation kind");
1355}
1356
1358 const MaterializeTemporaryExpr *m,
1359 const Expr *inner) {
1360 // TODO(cir): cgf.getTargetHooks();
1361 switch (m->getStorageDuration()) {
1362 case SD_FullExpression:
1363 case SD_Automatic: {
1364 QualType ty = inner->getType();
1365
1367
1368 // The temporary memory should be created in the same scope as the extending
1369 // declaration of the temporary materialization expression.
1370 cir::AllocaOp extDeclAlloca;
1371 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1372 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1373 if (extDeclAddrIter != cgf.localDeclMap.end())
1374 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1375 }
1376 mlir::OpBuilder::InsertPoint ip;
1377 if (extDeclAlloca)
1378 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1379 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1380 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1381 ip);
1382 }
1383 case SD_Thread:
1384 case SD_Static: {
1385 cgf.cgm.errorNYI(
1386 m->getSourceRange(),
1387 "createReferenceTemporary: static/thread storage duration");
1388 return Address::invalid();
1389 }
1390
1391 case SD_Dynamic:
1392 llvm_unreachable("temporary can't have dynamic storage duration");
1393 }
1394 llvm_unreachable("unknown storage duration");
1395}
1396
1398 const MaterializeTemporaryExpr *m,
1399 const Expr *e, Address referenceTemporary) {
1400 // Objective-C++ ARC:
1401 // If we are binding a reference to a temporary that has ownership, we
1402 // need to perform retain/release operations on the temporary.
1403 //
1404 // FIXME(ogcg): This should be looking at e, not m.
1405 if (m->getType().getObjCLifetime()) {
1406 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1407 return;
1408 }
1409
1411 if (dk == QualType::DK_none)
1412 return;
1413
1414 switch (m->getStorageDuration()) {
1415 case SD_Static:
1416 case SD_Thread: {
1417 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1418 if (const auto *classDecl =
1420 classDecl && !classDecl->hasTrivialDestructor())
1421 // Get the destructor for the reference temporary.
1422 referenceTemporaryDtor = classDecl->getDestructor();
1423
1424 if (!referenceTemporaryDtor)
1425 return;
1426
1427 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1428 "storage duration with destructors");
1429 break;
1430 }
1431
1432 case SD_FullExpression:
1433 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1435 break;
1436
1437 case SD_Automatic:
1438 cgf.cgm.errorNYI(e->getSourceRange(),
1439 "pushTemporaryCleanup: automatic storage duration");
1440 break;
1441
1442 case SD_Dynamic:
1443 llvm_unreachable("temporary cannot have dynamic storage duration");
1444 }
1445}
1446
1448 const MaterializeTemporaryExpr *m) {
1449 const Expr *e = m->getSubExpr();
1450
1451 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1452 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1453 "Reference should never be pseudo-strong!");
1454
1455 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1456 // as that will cause the lifetime adjustment to be lost for ARC
1457 auto ownership = m->getType().getObjCLifetime();
1458 if (ownership != Qualifiers::OCL_None &&
1459 ownership != Qualifiers::OCL_ExplicitNone) {
1460 cgm.errorNYI(e->getSourceRange(),
1461 "emitMaterializeTemporaryExpr: ObjCLifetime");
1462 return {};
1463 }
1464
1467 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1468
1469 for (const Expr *ignored : commaLHSs)
1470 emitIgnoredExpr(ignored);
1471
1472 if (isa<OpaqueValueExpr>(e)) {
1473 cgm.errorNYI(e->getSourceRange(),
1474 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1475 return {};
1476 }
1477
1478 // Create and initialize the reference temporary.
1479 Address object = createReferenceTemporary(*this, m, e);
1480
1481 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1482 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1483 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1484 return {};
1485 } else {
1487 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1488 }
1489 pushTemporaryCleanup(*this, m, e, object);
1490
1491 // Perform derived-to-base casts and/or field accesses, to get from the
1492 // temporary object we created (and, potentially, for which we extended
1493 // the lifetime) to the subobject we're binding the reference to.
1494 if (!adjustments.empty()) {
1495 cgm.errorNYI(e->getSourceRange(),
1496 "emitMaterializeTemporaryExpr: Adjustments");
1497 return {};
1498 }
1499
1500 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1501}
1502
1503LValue
1506
1507 auto it = opaqueLValues.find(e);
1508 if (it != opaqueLValues.end())
1509 return it->second;
1510
1511 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1512 return emitLValue(e->getSourceExpr());
1513}
1514
1515RValue
1518
1519 auto it = opaqueRValues.find(e);
1520 if (it != opaqueRValues.end())
1521 return it->second;
1522
1523 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1524 return emitAnyExpr(e->getSourceExpr());
1525}
1526
1528 if (e->isFileScope()) {
1529 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1530 return {};
1531 }
1532
1533 if (e->getType()->isVariablyModifiedType()) {
1534 cgm.errorNYI(e->getSourceRange(),
1535 "emitCompoundLiteralLValue: VariablyModifiedType");
1536 return {};
1537 }
1538
1539 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1540 ".compoundliteral");
1541 const Expr *initExpr = e->getInitializer();
1542 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1543
1544 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1545 /*Init*/ true);
1546
1547 // Block-scope compound literals are destroyed at the end of the enclosing
1548 // scope in C.
1549 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1550 cgm.errorNYI(e->getSourceRange(),
1551 "emitCompoundLiteralLValue: non C++ DestructedType");
1552 return {};
1553 }
1554
1555 return result;
1556}
1557
1559 RValue rv = emitCallExpr(e);
1560
1561 if (!rv.isScalar()) {
1562 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1563 return {};
1564 }
1565
1566 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1567 "Can't have a scalar return unless the return type is a "
1568 "reference type!");
1569
1571}
1572
1574 // Comma expressions just emit their LHS then their RHS as an l-value.
1575 if (e->getOpcode() == BO_Comma) {
1576 emitIgnoredExpr(e->getLHS());
1577 return emitLValue(e->getRHS());
1578 }
1579
1580 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) {
1581 cgm.errorNYI(e->getSourceRange(), "member pointers");
1582 return {};
1583 }
1584
1585 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1586
1587 // Note that in all of these cases, __block variables need the RHS
1588 // evaluated first just in case the variable gets moved by the RHS.
1589
1591 case cir::TEK_Scalar: {
1593 if (e->getLHS()->getType().getObjCLifetime() !=
1595 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1596 return {};
1597 }
1598
1599 RValue rv = emitAnyExpr(e->getRHS());
1600 LValue lv = emitLValue(e->getLHS());
1601
1602 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1603 if (lv.isBitField())
1605 else
1606 emitStoreThroughLValue(rv, lv);
1607
1608 if (getLangOpts().OpenMP) {
1609 cgm.errorNYI(e->getSourceRange(), "openmp");
1610 return {};
1611 }
1612
1613 return lv;
1614 }
1615
1616 case cir::TEK_Complex: {
1618 }
1619
1620 case cir::TEK_Aggregate:
1621 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1622 return {};
1623 }
1624 llvm_unreachable("bad evaluation kind");
1625}
1626
1627/// Emit code to compute the specified expression which
1628/// can have any type. The result is returned as an RValue struct.
1630 bool ignoreResult) {
1632 case cir::TEK_Scalar:
1633 return RValue::get(emitScalarExpr(e));
1634 case cir::TEK_Complex:
1636 case cir::TEK_Aggregate: {
1637 if (!ignoreResult && aggSlot.isIgnored())
1638 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1640 emitAggExpr(e, aggSlot);
1641 return aggSlot.asRValue();
1642 }
1643 }
1644 llvm_unreachable("bad evaluation kind");
1645}
1646
1647// Detect the unusual situation where an inline version is shadowed by a
1648// non-inline version. In that case we should pick the external one
1649// everywhere. That's GCC behavior too.
1651 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1652 if (!pd->isInlineBuiltinDeclaration())
1653 return false;
1654 return true;
1655}
1656
1657CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1658 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1659
1660 if (unsigned builtinID = fd->getBuiltinID()) {
1661 if (fd->getAttr<AsmLabelAttr>()) {
1662 cgm.errorNYI("AsmLabelAttr");
1663 }
1664
1665 StringRef ident = fd->getName();
1666 std::string fdInlineName = (ident + ".inline").str();
1667
1668 bool isPredefinedLibFunction =
1669 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1670 // Assume nobuiltins everywhere until we actually read the attributes.
1671 bool hasAttributeNoBuiltin = true;
1673
1674 // When directing calling an inline builtin, call it through it's mangled
1675 // name to make it clear it's not the actual builtin.
1676 auto fn = cast<cir::FuncOp>(curFn);
1677 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1678 cir::FuncOp clone =
1679 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1680
1681 if (!clone) {
1682 // Create a forward declaration - the body will be generated in
1683 // generateCode when the function definition is processed
1684 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1685 mlir::OpBuilder::InsertionGuard guard(builder);
1686 builder.setInsertionPointToStart(cgm.getModule().getBody());
1687
1688 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1689 calleeFunc.getFunctionType());
1690 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1691 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1692 clone.setSymVisibility("private");
1693 clone.setInlineKindAttr(cir::InlineAttr::get(
1694 &cgm.getMLIRContext(), cir::InlineKind::AlwaysInline));
1695 }
1696 return CIRGenCallee::forDirect(clone, gd);
1697 }
1698
1699 // Replaceable builtins provide their own implementation of a builtin. If we
1700 // are in an inline builtin implementation, avoid trivial infinite
1701 // recursion. Honor __attribute__((no_builtin("foo"))) or
1702 // __attribute__((no_builtin)) on the current function unless foo is
1703 // not a predefined library function which means we must generate the
1704 // builtin no matter what.
1705 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1706 return CIRGenCallee::forBuiltin(builtinID, fd);
1707 }
1708
1709 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1710
1711 assert(!cir::MissingFeatures::hip());
1712
1713 return CIRGenCallee::forDirect(callee, gd);
1714}
1715
1717 if (ty->isVoidType())
1718 return RValue::get(nullptr);
1719
1720 cgm.errorNYI("unsupported type for undef rvalue");
1721 return RValue::get(nullptr);
1722}
1723
1725 const CIRGenCallee &origCallee,
1726 const clang::CallExpr *e,
1728 // Get the actual function type. The callee type will always be a pointer to
1729 // function type or a block pointer type.
1730 assert(calleeTy->isFunctionPointerType() &&
1731 "Callee must have function pointer type!");
1732
1733 calleeTy = getContext().getCanonicalType(calleeTy);
1734 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
1735
1736 CIRGenCallee callee = origCallee;
1737
1738 if (getLangOpts().CPlusPlus)
1740
1741 const auto *fnType = cast<FunctionType>(pointeeTy);
1742
1744
1745 CallArgList args;
1747
1748 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
1749 e->getDirectCallee());
1750
1751 const CIRGenFunctionInfo &funcInfo =
1752 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
1753
1754 // C99 6.5.2.2p6:
1755 // If the expression that denotes the called function has a type that does
1756 // not include a prototype, [the default argument promotions are performed].
1757 // If the number of arguments does not equal the number of parameters, the
1758 // behavior is undefined. If the function is defined with a type that
1759 // includes a prototype, and either the prototype ends with an ellipsis (,
1760 // ...) or the types of the arguments after promotion are not compatible
1761 // with the types of the parameters, the behavior is undefined. If the
1762 // function is defined with a type that does not include a prototype, and
1763 // the types of the arguments after promotion are not compatible with those
1764 // of the parameters after promotion, the behavior is undefined [except in
1765 // some trivial cases].
1766 // That is, in the general case, we should assume that a call through an
1767 // unprototyped function type works like a *non-variadic* call. The way we
1768 // make this work is to cast to the exxact type fo the promoted arguments.
1769 if (isa<FunctionNoProtoType>(fnType)) {
1772 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
1773 // get non-variadic function type
1774 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
1775 calleeTy.getReturnType(), false);
1776 auto calleePtrTy = cir::PointerType::get(calleeTy);
1777
1778 mlir::Operation *fn = callee.getFunctionPointer();
1779 mlir::Value addr;
1780 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
1781 addr = cir::GetGlobalOp::create(
1782 builder, getLoc(e->getSourceRange()),
1783 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
1784 } else {
1785 addr = fn->getResult(0);
1786 }
1787
1788 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
1789 callee.setFunctionPointer(fn);
1790 }
1791
1793 assert(!cir::MissingFeatures::hip());
1795
1796 cir::CIRCallOpInterface callOp;
1797 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
1798 getLoc(e->getExprLoc()));
1799
1801
1802 return callResult;
1803}
1804
1806 e = e->IgnoreParens();
1807
1808 // Look through function-to-pointer decay.
1809 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
1810 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
1811 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
1812 return emitCallee(implicitCast->getSubExpr());
1813 }
1814 // When performing an indirect call through a function pointer lvalue, the
1815 // function pointer lvalue is implicitly converted to an rvalue through an
1816 // lvalue-to-rvalue conversion.
1817 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
1818 "unexpected implicit cast on function pointers");
1819 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
1820 // Resolve direct calls.
1821 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
1822 return emitDirectCallee(funcDecl);
1823 } else if (auto me = dyn_cast<MemberExpr>(e)) {
1824 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
1825 emitIgnoredExpr(me->getBase());
1826 return emitDirectCallee(fd);
1827 }
1828 // Else fall through to the indirect reference handling below.
1829 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
1831 }
1832
1833 // Otherwise, we have an indirect reference.
1834 mlir::Value calleePtr;
1836 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
1837 calleePtr = emitScalarExpr(e);
1838 functionType = ptrType->getPointeeType();
1839 } else {
1840 functionType = e->getType();
1841 calleePtr = emitLValue(e).getPointer();
1842 }
1843 assert(functionType->isFunctionType());
1844
1845 GlobalDecl gd;
1846 if (const auto *vd =
1847 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
1848 gd = GlobalDecl(vd);
1849
1850 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
1851 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
1852 return callee;
1853}
1854
1858
1859 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
1861
1862 if (isa<CUDAKernelCallExpr>(e)) {
1863 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
1864 return RValue::get(nullptr);
1865 }
1866
1867 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
1868 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
1869 // operator member call.
1870 if (const CXXMethodDecl *md =
1871 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
1872 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
1873 // A CXXOperatorCallExpr is created even for explicit object methods, but
1874 // these should be treated like static function calls. Fall through to do
1875 // that.
1876 }
1877
1878 CIRGenCallee callee = emitCallee(e->getCallee());
1879
1880 if (callee.isBuiltin())
1881 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
1882 returnValue);
1883
1884 if (callee.isPseudoDestructor())
1886
1887 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
1888}
1889
1890/// Emit code to compute the specified expression, ignoring the result.
1892 if (e->isPRValue()) {
1893 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
1894 return;
1895 }
1896
1897 // Just emit it as an l-value and drop the result.
1898 emitLValue(e);
1899}
1900
1902 LValueBaseInfo *baseInfo) {
1904 assert(e->getType()->isArrayType() &&
1905 "Array to pointer decay must have array source type!");
1906
1907 // Expressions of array type can't be bitfields or vector elements.
1908 LValue lv = emitLValue(e);
1909 Address addr = lv.getAddress();
1910
1911 // If the array type was an incomplete type, we need to make sure
1912 // the decay ends up being the right type.
1913 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
1914
1915 if (e->getType()->isVariableArrayType())
1916 return addr;
1917
1918 [[maybe_unused]] auto pointeeTy =
1919 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
1920
1921 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
1922 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
1923 assert(pointeeTy == arrayTy);
1924
1925 // The result of this decay conversion points to an array element within the
1926 // base lvalue. However, since TBAA currently does not support representing
1927 // accesses to elements of member arrays, we conservatively represent accesses
1928 // to the pointee object as if it had no any base lvalue specified.
1929 // TODO: Support TBAA for member arrays.
1932
1933 mlir::Value ptr = builder.maybeBuildArrayDecay(
1934 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
1935 convertTypeForMem(eltType));
1936 return Address(ptr, addr.getAlignment());
1937}
1938
1939/// Given the address of a temporary variable, produce an r-value of its type.
1943 switch (getEvaluationKind(type)) {
1944 case cir::TEK_Complex:
1945 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
1946 case cir::TEK_Aggregate:
1947 cgm.errorNYI(loc, "convertTempToRValue: aggregate type");
1948 return RValue::get(nullptr);
1949 case cir::TEK_Scalar:
1950 return RValue::get(emitLoadOfScalar(lvalue, loc));
1951 }
1952 llvm_unreachable("bad evaluation kind");
1953}
1954
1955/// Emit an `if` on a boolean condition, filling `then` and `else` into
1956/// appropriated regions.
1957mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
1958 const Stmt *thenS,
1959 const Stmt *elseS) {
1960 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
1961 std::optional<mlir::Location> elseLoc;
1962 if (elseS)
1963 elseLoc = getLoc(elseS->getSourceRange());
1964
1965 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
1967 cond, /*thenBuilder=*/
1968 [&](mlir::OpBuilder &, mlir::Location) {
1969 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
1970 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
1971 },
1972 thenLoc,
1973 /*elseBuilder=*/
1974 [&](mlir::OpBuilder &, mlir::Location) {
1975 assert(elseLoc && "Invalid location for elseS.");
1976 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
1977 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
1978 },
1979 elseLoc);
1980
1981 return mlir::LogicalResult::success(resThen.succeeded() &&
1982 resElse.succeeded());
1983}
1984
1985/// Emit an `if` on a boolean condition, filling `then` and `else` into
1986/// appropriated regions.
1988 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
1989 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
1990 std::optional<mlir::Location> elseLoc) {
1991 // Attempt to be as accurate as possible with IfOp location, generate
1992 // one fused location that has either 2 or 4 total locations, depending
1993 // on else's availability.
1994 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
1995 if (elseLoc)
1996 ifLocs.push_back(*elseLoc);
1997 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
1998
1999 // Emit the code with the fully general case.
2000 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2001 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2002 /*thenBuilder=*/thenBuilder,
2003 /*elseBuilder=*/elseBuilder);
2004}
2005
2006/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2007mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2008 const Expr *cond) {
2011 cond = cond->IgnoreParens();
2012
2013 // In LLVM the condition is reversed here for efficient codegen.
2014 // This should be done in CIR prior to LLVM lowering, if we do now
2015 // we can make CIR based diagnostics misleading.
2016 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2018
2019 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2020 Expr *trueExpr = condOp->getTrueExpr();
2021 Expr *falseExpr = condOp->getFalseExpr();
2022 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2023
2024 mlir::Value ternaryOpRes =
2025 cir::TernaryOp::create(
2026 builder, loc, condV, /*thenBuilder=*/
2027 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2028 mlir::Value lhs = emitScalarExpr(trueExpr);
2029 cir::YieldOp::create(b, loc, lhs);
2030 },
2031 /*elseBuilder=*/
2032 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2033 mlir::Value rhs = emitScalarExpr(falseExpr);
2034 cir::YieldOp::create(b, loc, rhs);
2035 })
2036 .getResult();
2037
2038 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2039 getContext().BoolTy, condOp->getExprLoc());
2040 }
2041
2042 if (isa<CXXThrowExpr>(cond)) {
2043 cgm.errorNYI("NYI");
2044 return createDummyValue(loc, cond->getType());
2045 }
2046
2047 // If the branch has a condition wrapped by __builtin_unpredictable,
2048 // create metadata that specifies that the branch is unpredictable.
2049 // Don't bother if not optimizing because that metadata would not be used.
2051
2052 // Emit the code with the fully general case.
2053 return evaluateExprAsBool(cond);
2054}
2055
2056mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2057 mlir::Location loc, CharUnits alignment,
2058 bool insertIntoFnEntryBlock,
2059 mlir::Value arraySize) {
2060 mlir::Block *entryBlock = insertIntoFnEntryBlock
2062 : curLexScope->getEntryBlock();
2063
2064 // If this is an alloca in the entry basic block of a cir.try and there's
2065 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2066 // scope instead. This is necessary in order to guarantee all SSA values are
2067 // reachable during cleanups.
2068 if (auto tryOp =
2069 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2070 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2071 entryBlock = &scopeOp.getScopeRegion().front();
2072 }
2073
2074 return emitAlloca(name, ty, loc, alignment,
2075 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2076}
2077
2078mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2079 mlir::Location loc, CharUnits alignment,
2080 mlir::OpBuilder::InsertPoint ip,
2081 mlir::Value arraySize) {
2082 // CIR uses its own alloca address space rather than follow the target data
2083 // layout like original CodeGen. The data layout awareness should be done in
2084 // the lowering pass instead.
2085 cir::PointerType localVarPtrTy =
2087 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2088
2089 mlir::Value addr;
2090 {
2091 mlir::OpBuilder::InsertionGuard guard(builder);
2092 builder.restoreInsertionPoint(ip);
2093 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2094 /*var type*/ ty, name, alignIntAttr, arraySize);
2096 }
2097 return addr;
2098}
2099
2100// Note: this function also emit constructor calls to support a MSVC extensions
2101// allowing explicit constructor function call.
2104 const Expr *callee = ce->getCallee()->IgnoreParens();
2105
2106 if (isa<BinaryOperator>(callee)) {
2107 cgm.errorNYI(ce->getSourceRange(),
2108 "emitCXXMemberCallExpr: C++ binary operator");
2109 return RValue::get(nullptr);
2110 }
2111
2112 const auto *me = cast<MemberExpr>(callee);
2113 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2114
2115 if (md->isStatic()) {
2116 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2117 return RValue::get(nullptr);
2118 }
2119
2120 bool hasQualifier = me->hasQualifier();
2121 NestedNameSpecifier qualifier = me->getQualifier();
2122 bool isArrow = me->isArrow();
2123 const Expr *base = me->getBase();
2124
2126 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2127}
2128
2130 AggValueSlot dest) {
2131 assert(!dest.isIgnored() && "Must have a destination!");
2132 const CXXConstructorDecl *cd = e->getConstructor();
2133
2134 // If we require zero initialization before (or instead of) calling the
2135 // constructor, as can be the case with a non-user-provided default
2136 // constructor, emit the zero initialization now, unless destination is
2137 // already zeroed.
2138 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
2139 switch (e->getConstructionKind()) {
2143 e->getType());
2144 break;
2147 cgm.errorNYI(e->getSourceRange(),
2148 "emitCXXConstructExpr: base requires initialization");
2149 break;
2150 }
2151 }
2152
2153 // If this is a call to a trivial default constructor, do nothing.
2154 if (cd->isTrivial() && cd->isDefaultConstructor())
2155 return;
2156
2157 // Elide the constructor if we're constructing from a temporary
2158 if (getLangOpts().ElideConstructors && e->isElidable()) {
2159 // FIXME: This only handles the simplest case, where the source object is
2160 // passed directly as the first argument to the constructor. This
2161 // should also handle stepping through implicit casts and conversion
2162 // sequences which involve two steps, with a conversion operator
2163 // follwed by a converting constructor.
2164 const Expr *srcObj = e->getArg(0);
2165 assert(srcObj->isTemporaryObject(getContext(), cd->getParent()));
2166 assert(
2167 getContext().hasSameUnqualifiedType(e->getType(), srcObj->getType()));
2168 emitAggExpr(srcObj, dest);
2169 return;
2170 }
2171
2172 if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
2174 emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
2175 } else {
2176
2178 bool forVirtualBase = false;
2179 bool delegating = false;
2180
2181 switch (e->getConstructionKind()) {
2184 break;
2186 // We should be emitting a constructor; GlobalDecl will assert this
2187 type = curGD.getCtorType();
2188 delegating = true;
2189 break;
2191 forVirtualBase = true;
2192 [[fallthrough]];
2194 type = Ctor_Base;
2195 break;
2196 }
2197
2198 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
2199 }
2200}
2201
2203 // Emit the expression as an lvalue.
2204 LValue lv = emitLValue(e);
2205 assert(lv.isSimple());
2206 mlir::Value value = lv.getPointer();
2207
2209
2210 return RValue::get(value);
2211}
2212
2214 LValueBaseInfo *pointeeBaseInfo) {
2215 if (refLVal.isVolatile())
2216 cgm.errorNYI(loc, "load of volatile reference");
2217
2218 cir::LoadOp load =
2219 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2220 refLVal.getAddress().getPointer());
2221
2223
2224 QualType pointeeType = refLVal.getType()->getPointeeType();
2225 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2226 return Address(load, convertTypeForMem(pointeeType), align);
2227}
2228
2230 mlir::Location loc,
2231 QualType refTy,
2232 AlignmentSource source) {
2233 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2234 LValueBaseInfo pointeeBaseInfo;
2236 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2237 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2238 pointeeBaseInfo);
2239}
2240
2241void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2242 cir::TrapOp::create(builder, loc);
2243 if (createNewBlock)
2244 builder.createBlock(builder.getBlock()->getParent());
2245}
2246
2248 bool createNewBlock) {
2250 cir::UnreachableOp::create(builder, getLoc(loc));
2251 if (createNewBlock)
2252 builder.createBlock(builder.getBlock()->getParent());
2253}
2254
2255mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2256 clang::QualType qt) {
2257 mlir::Type t = convertType(qt);
2258 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2259 return builder.createDummyValue(loc, t, alignment);
2260}
2261
2262//===----------------------------------------------------------------------===//
2263// CIR builder helpers
2264//===----------------------------------------------------------------------===//
2265
2267 const Twine &name, Address *alloca,
2268 mlir::OpBuilder::InsertPoint ip) {
2269 // FIXME: Should we prefer the preferred type alignment here?
2270 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2271 alloca, ip);
2272}
2273
2275 mlir::Location loc, const Twine &name,
2276 Address *alloca,
2277 mlir::OpBuilder::InsertPoint ip) {
2278 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2279 /*ArraySize=*/nullptr, alloca, ip);
2280 if (ty->isConstantMatrixType()) {
2282 cgm.errorNYI(loc, "temporary matrix value");
2283 }
2284 return result;
2285}
2286
2287/// This creates a alloca and inserts it into the entry block of the
2288/// current region.
2290 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2291 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2292 cir::AllocaOp alloca = ip.isSet()
2293 ? createTempAlloca(ty, loc, name, ip, arraySize)
2294 : createTempAlloca(ty, loc, name, arraySize);
2295 alloca.setAlignmentAttr(cgm.getSize(align));
2296 return Address(alloca, ty, align);
2297}
2298
2299/// This creates a alloca and inserts it into the entry block. The alloca is
2300/// casted to default address space if necessary.
2302 mlir::Location loc, const Twine &name,
2303 mlir::Value arraySize,
2304 Address *allocaAddr,
2305 mlir::OpBuilder::InsertPoint ip) {
2306 Address alloca =
2307 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2308 if (allocaAddr)
2309 *allocaAddr = alloca;
2310 mlir::Value v = alloca.getPointer();
2311 // Alloca always returns a pointer in alloca address space, which may
2312 // be different from the type defined by the language. For example,
2313 // in C++ the auto variables are in the default address space. Therefore
2314 // cast alloca to the default address space when necessary.
2316 return Address(v, ty, align);
2317}
2318
2319/// This creates an alloca and inserts it into the entry block if \p ArraySize
2320/// is nullptr, otherwise inserts it at the current insertion point of the
2321/// builder.
2322cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2323 mlir::Location loc,
2324 const Twine &name,
2325 mlir::Value arraySize,
2326 bool insertIntoFnEntryBlock) {
2327 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2328 insertIntoFnEntryBlock, arraySize)
2329 .getDefiningOp());
2330}
2331
2332/// This creates an alloca and inserts it into the provided insertion point
2333cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2334 mlir::Location loc,
2335 const Twine &name,
2336 mlir::OpBuilder::InsertPoint ip,
2337 mlir::Value arraySize) {
2338 assert(ip.isSet() && "Insertion point is not set");
2339 return mlir::cast<cir::AllocaOp>(
2340 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2341 .getDefiningOp());
2342}
2343
2344/// Try to emit a reference to the given value without producing it as
2345/// an l-value. For many cases, this is just an optimization, but it avoids
2346/// us needing to emit global copies of variables if they're named without
2347/// triggering a formal use in a context where we can't emit a direct
2348/// reference to them, for instance if a block or lambda or a member of a
2349/// local class uses a const int variable or constexpr variable from an
2350/// enclosing function.
2351///
2352/// For named members of enums, this is the only way they are emitted.
2355 const ValueDecl *value = refExpr->getDecl();
2356
2357 // There is a lot more to do here, but for now only EnumConstantDecl is
2358 // supported.
2360
2361 // The value needs to be an enum constant or a constant variable.
2362 if (!isa<EnumConstantDecl>(value))
2363 return ConstantEmission();
2364
2365 Expr::EvalResult result;
2366 if (!refExpr->EvaluateAsRValue(result, getContext()))
2367 return ConstantEmission();
2368
2369 QualType resultType = refExpr->getType();
2370
2371 // As long as we're only handling EnumConstantDecl, there should be no
2372 // side-effects.
2373 assert(!result.HasSideEffects);
2374
2375 // Emit as a constant.
2376 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2377 // somewhat heavy refactoring...)
2378 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2379 refExpr->getLocation(), result.Val, resultType);
2380 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2381 assert(cstToEmit && "expected a typed attribute");
2382
2384
2385 return ConstantEmission::forValue(cstToEmit);
2386}
2387
2391 return tryEmitAsConstant(dre);
2392 return ConstantEmission();
2393}
2394
2396 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2397 assert(constant && "not a constant");
2398 if (constant.isReference()) {
2399 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2400 return {};
2401 }
2402 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2403}
2404
2406 const StringLiteral *sl = e->getFunctionName();
2407 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2408 auto fn = cast<cir::FuncOp>(curFn);
2409 StringRef fnName = fn.getName();
2410 fnName.consume_front("\01");
2411 std::array<StringRef, 2> nameItems = {
2413 std::string gvName = llvm::join(nameItems, ".");
2414 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2415 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2416
2417 return emitStringLiteralLValue(sl, gvName);
2418}
2419
2424
2425namespace {
2426// Handle the case where the condition is a constant evaluatable simple integer,
2427// which means we don't have to separately handle the true/false blocks.
2428std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2430 const Expr *condExpr = e->getCond();
2431 llvm::APSInt condExprVal;
2432 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2433 return std::nullopt;
2434
2435 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2436 if (!condExprVal.getBoolValue())
2437 std::swap(live, dead);
2438
2439 if (cgf.containsLabel(dead))
2440 return std::nullopt;
2441
2442 // If the true case is live, we need to track its region.
2445 // If a throw expression we emit it and return an undefined lvalue
2446 // because it can't be used.
2447 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2448 cgf.emitCXXThrowExpr(throwExpr);
2449 // Return an undefined lvalue - the throw terminates execution
2450 // so this value will never actually be used
2451 mlir::Type elemTy = cgf.convertType(dead->getType());
2452 mlir::Value undefPtr =
2453 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2454 cgf.getLoc(throwExpr->getSourceRange()));
2455 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2456 dead->getType());
2457 }
2458 return cgf.emitLValue(live);
2459}
2460
2461/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2462/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2463/// LValue is returned and the current block has been terminated.
2464static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2465 const Expr *operand) {
2466 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2467 cgf.emitCXXThrowExpr(throwExpr);
2468 return std::nullopt;
2469 }
2470
2471 return cgf.emitLValue(operand);
2472}
2473} // namespace
2474
2475// Create and generate the 3 blocks for a conditional operator.
2476// Leaves the 'current block' in the continuation basic block.
2477template <typename FuncTy>
2480 const FuncTy &branchGenFunc) {
2481 ConditionalInfo info;
2482 ConditionalEvaluation eval(*this);
2483 mlir::Location loc = getLoc(e->getSourceRange());
2484 CIRGenBuilderTy &builder = getBuilder();
2485
2486 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2488 mlir::Type yieldTy{};
2489
2490 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2491 const Expr *expr, std::optional<LValue> &resultLV) {
2492 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2493 curLexScope->setAsTernary();
2494
2496 eval.beginEvaluation();
2497 resultLV = branchGenFunc(*this, expr);
2498 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2499 eval.endEvaluation();
2500
2501 if (resultPtr) {
2502 yieldTy = resultPtr.getType();
2503 cir::YieldOp::create(b, loc, resultPtr);
2504 } else {
2505 // If LHS or RHS is a void expression we need
2506 // to patch arms as to properly match yield types.
2507 // If the current block's terminator is an UnreachableOp (from a throw),
2508 // we don't need a yield
2509 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2510 mlir::Operation *terminator =
2511 builder.getInsertionBlock()->getTerminator();
2512 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2513 insertPoints.push_back(b.saveInsertionPoint());
2514 }
2515 }
2516 };
2517
2518 info.result = cir::TernaryOp::create(
2519 builder, loc, condV,
2520 /*trueBuilder=*/
2521 [&](mlir::OpBuilder &b, mlir::Location loc) {
2522 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2523 },
2524 /*falseBuilder=*/
2525 [&](mlir::OpBuilder &b, mlir::Location loc) {
2526 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2527 })
2528 .getResult();
2529
2530 // If both arms are void, so be it.
2531 if (!yieldTy)
2532 yieldTy = voidTy;
2533
2534 // Insert required yields.
2535 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2536 mlir::OpBuilder::InsertionGuard guard(builder);
2537 builder.restoreInsertionPoint(toInsert);
2538
2539 // Block does not return: build empty yield.
2540 if (!yieldTy) {
2541 cir::YieldOp::create(builder, loc);
2542 } else { // Block returns: set null yield value.
2543 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2544 cir::YieldOp::create(builder, loc, op0);
2545 }
2546 }
2547
2548 return info;
2549}
2550
2553 if (!expr->isGLValue()) {
2554 // ?: here should be an aggregate.
2555 assert(hasAggregateEvaluationKind(expr->getType()) &&
2556 "Unexpected conditional operator!");
2557 return emitAggExprToLValue(expr);
2558 }
2559
2560 OpaqueValueMapping binding(*this, expr);
2561 if (std::optional<LValue> res =
2562 handleConditionalOperatorLValueSimpleCase(*this, expr))
2563 return *res;
2564
2565 ConditionalInfo info =
2566 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2567 return emitLValueOrThrowExpression(cgf, e);
2568 });
2569
2570 if ((info.lhs && !info.lhs->isSimple()) ||
2571 (info.rhs && !info.rhs->isSimple())) {
2572 cgm.errorNYI(expr->getSourceRange(),
2573 "unsupported conditional operator with non-simple lvalue");
2574 return LValue();
2575 }
2576
2577 if (info.lhs && info.rhs) {
2578 Address lhsAddr = info.lhs->getAddress();
2579 Address rhsAddr = info.rhs->getAddress();
2580 Address result(info.result, lhsAddr.getElementType(),
2581 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2582 AlignmentSource alignSource =
2583 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2584 info.rhs->getBaseInfo().getAlignmentSource());
2586 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2587 }
2588
2589 assert((info.lhs || info.rhs) &&
2590 "both operands of glvalue conditional are throw-expressions?");
2591 return info.lhs ? *info.lhs : *info.rhs;
2592}
2593
2594/// An LValue is a candidate for having its loads and stores be made atomic if
2595/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2596/// performing such an operation can be performed without a libcall.
2598 if (!cgm.getLangOpts().MSVolatile)
2599 return false;
2600
2601 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2602 return false;
2603}
#define V(N, I)
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
Opcode getOpcode() const
Definition Expr.h:4017
mlir::Value getPointer() const
Definition Address.h:82
mlir::Type getElementType() const
Definition Address.h:109
static Address invalid()
Definition Address.h:67
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:117
mlir::Type getType() const
Definition Address.h:101
bool isValid() const
Definition Address.h:68
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:120
An aggregate value slot.
IsZeroed_t isZeroed() const
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:121
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:183
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:97
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:125
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
unsigned getBuiltinID() const
Definition CIRGenCall.h:101
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:106
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:145
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:115
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
const clang::Qualifiers & getQuals() const
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:90
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
bool isElidable() const
Whether this construction is elidable.
Definition ExprCXX.h:1618
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
bool requiresZeroInitialization() const
Whether this construction first requires zero-initialization before the initializer is called.
Definition ExprCXX.h:1651
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
CXXConstructionKind getConstructionKind() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1660
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
bool isDefaultConstructor() const
Whether this constructor is a default constructor (C++ [class.ctor]p5), which can be used to default-...
Definition DeclCXX.cpp:2999
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
ConditionalOperator - The ?
Definition Expr.h:4325
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const
Determine whether the result of this expression is a temporary object of the given class type.
Definition Expr.cpp:3248
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4815
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4755
Represents a function declaration or definition.
Definition Decl.h:2000
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition Decl.h:2377
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5254
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4922
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4947
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4939
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4972
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4312
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9167
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8614
bool isFunctionPointerType() const
Definition TypeBase.h:8582
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8676
bool isPointerType() const
Definition TypeBase.h:8515
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
bool isVariableArrayType() const
Definition TypeBase.h:8626
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAnyComplexType() const
Definition TypeBase.h:8650
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9044
bool isAtomicType() const
Definition TypeBase.h:8697
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2168
bool hasInit() const
Definition Decl.cpp:2398
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2366
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opLoadStoreThreadLocal()
static bool opAllocaNonGC()
static bool opGlobalThreadLocal()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612