clang 22.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
27#include <optional>
28
29using namespace clang;
30using namespace clang::CIRGen;
31using namespace cir;
32
33/// Get the address of a zero-sized field within a record. The resulting address
34/// doesn't necessarily have the right type.
36 const FieldDecl *field,
37 llvm::StringRef fieldName,
38 unsigned fieldIndex) {
39 if (field->isZeroSize(getContext())) {
40 cgm.errorNYI(field->getSourceRange(),
41 "emitAddrOfFieldStorage: zero-sized field");
42 return Address::invalid();
43 }
44
45 mlir::Location loc = getLoc(field->getLocation());
46
47 mlir::Type fieldType = convertType(field->getType());
48 auto fieldPtr = cir::PointerType::get(fieldType);
49 // For most cases fieldName is the same as field->getName() but for lambdas,
50 // which do not currently carry the name, so it can be passed down from the
51 // CaptureStmt.
52 cir::GetMemberOp memberAddr = builder.createGetMember(
53 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
54
55 // Retrieve layout information, compute alignment and return the final
56 // address.
57 const RecordDecl *rec = field->getParent();
58 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
59 unsigned idx = layout.getCIRFieldNo(field);
61 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
62 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
63}
64
65/// Given an expression of pointer type, try to
66/// derive a more accurate bound on the alignment of the pointer.
68 LValueBaseInfo *baseInfo) {
69 // We allow this with ObjC object pointers because of fragile ABIs.
70 assert(expr->getType()->isPointerType() ||
71 expr->getType()->isObjCObjectPointerType());
72 expr = expr->IgnoreParens();
73
74 // Casts:
75 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
76 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
77 cgm.emitExplicitCastExprType(ece);
78
79 switch (ce->getCastKind()) {
80 // Non-converting casts (but not C's implicit conversion from void*).
81 case CK_BitCast:
82 case CK_NoOp:
83 case CK_AddressSpaceConversion: {
84 if (const auto *ptrTy =
85 ce->getSubExpr()->getType()->getAs<PointerType>()) {
86 if (ptrTy->getPointeeType()->isVoidType())
87 break;
88
89 LValueBaseInfo innerBaseInfo;
91 Address addr =
92 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
93 if (baseInfo)
94 *baseInfo = innerBaseInfo;
95
96 if (isa<ExplicitCastExpr>(ce)) {
97 LValueBaseInfo targetTypeBaseInfo;
98
99 const QualType pointeeType = expr->getType()->getPointeeType();
100 const CharUnits align =
101 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
102
103 // If the source l-value is opaque, honor the alignment of the
104 // casted-to type.
105 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
106 if (baseInfo)
107 baseInfo->mergeForCast(targetTypeBaseInfo);
108 addr = Address(addr.getPointer(), addr.getElementType(), align);
109 }
110 }
111
113
114 const mlir::Type eltTy =
115 convertTypeForMem(expr->getType()->getPointeeType());
116 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
117 addr, eltTy);
119
120 return addr;
121 }
122 break;
123 }
124
125 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
126 case CK_ArrayToPointerDecay:
127 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
128
129 case CK_UncheckedDerivedToBase:
130 case CK_DerivedToBase: {
133 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
134 const CXXRecordDecl *derived =
135 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
136 return getAddressOfBaseClass(addr, derived, ce->path(),
138 ce->getExprLoc());
139 }
140
141 case CK_AnyPointerToBlockPointerCast:
142 case CK_BaseToDerived:
143 case CK_BaseToDerivedMemberPointer:
144 case CK_BlockPointerToObjCPointerCast:
145 case CK_BuiltinFnToFnPtr:
146 case CK_CPointerToObjCPointerCast:
147 case CK_DerivedToBaseMemberPointer:
148 case CK_Dynamic:
149 case CK_FunctionToPointerDecay:
150 case CK_IntegralToPointer:
151 case CK_LValueToRValue:
152 case CK_LValueToRValueBitCast:
153 case CK_NullToMemberPointer:
154 case CK_NullToPointer:
155 case CK_ReinterpretMemberPointer:
156 // Common pointer conversions, nothing to do here.
157 // TODO: Is there any reason to treat base-to-derived conversions
158 // specially?
159 break;
160
161 case CK_ARCConsumeObject:
162 case CK_ARCExtendBlockObject:
163 case CK_ARCProduceObject:
164 case CK_ARCReclaimReturnedObject:
165 case CK_AtomicToNonAtomic:
166 case CK_BooleanToSignedIntegral:
167 case CK_ConstructorConversion:
168 case CK_CopyAndAutoreleaseBlockObject:
169 case CK_Dependent:
170 case CK_FixedPointCast:
171 case CK_FixedPointToBoolean:
172 case CK_FixedPointToFloating:
173 case CK_FixedPointToIntegral:
174 case CK_FloatingCast:
175 case CK_FloatingComplexCast:
176 case CK_FloatingComplexToBoolean:
177 case CK_FloatingComplexToIntegralComplex:
178 case CK_FloatingComplexToReal:
179 case CK_FloatingRealToComplex:
180 case CK_FloatingToBoolean:
181 case CK_FloatingToFixedPoint:
182 case CK_FloatingToIntegral:
183 case CK_HLSLAggregateSplatCast:
184 case CK_HLSLArrayRValue:
185 case CK_HLSLElementwiseCast:
186 case CK_HLSLVectorTruncation:
187 case CK_IntToOCLSampler:
188 case CK_IntegralCast:
189 case CK_IntegralComplexCast:
190 case CK_IntegralComplexToBoolean:
191 case CK_IntegralComplexToFloatingComplex:
192 case CK_IntegralComplexToReal:
193 case CK_IntegralRealToComplex:
194 case CK_IntegralToBoolean:
195 case CK_IntegralToFixedPoint:
196 case CK_IntegralToFloating:
197 case CK_LValueBitCast:
198 case CK_MatrixCast:
199 case CK_MemberPointerToBoolean:
200 case CK_NonAtomicToAtomic:
201 case CK_ObjCObjectLValueCast:
202 case CK_PointerToBoolean:
203 case CK_PointerToIntegral:
204 case CK_ToUnion:
205 case CK_ToVoid:
206 case CK_UserDefinedConversion:
207 case CK_VectorSplat:
208 case CK_ZeroToOCLOpaqueType:
209 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
210 }
211 }
212
213 // Unary &
214 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
215 // TODO(cir): maybe we should use cir.unary for pointers here instead.
216 if (uo->getOpcode() == UO_AddrOf) {
217 LValue lv = emitLValue(uo->getSubExpr());
218 if (baseInfo)
219 *baseInfo = lv.getBaseInfo();
221 return lv.getAddress();
222 }
223 }
224
225 // std::addressof and variants.
226 if (auto const *call = dyn_cast<CallExpr>(expr)) {
227 switch (call->getBuiltinCallee()) {
228 default:
229 break;
230 case Builtin::BIaddressof:
231 case Builtin::BI__addressof:
232 case Builtin::BI__builtin_addressof: {
233 cgm.errorNYI(expr->getSourceRange(),
234 "emitPointerWithAlignment: builtin addressof");
235 return Address::invalid();
236 }
237 }
238 }
239
240 // Otherwise, use the alignment of the type.
242 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
243 /*forPointeeType=*/true, baseInfo);
244}
245
247 bool isInit) {
248 if (!dst.isSimple()) {
249 if (dst.isVectorElt()) {
250 // Read/modify/write the vector, inserting the new element
251 const mlir::Location loc = dst.getVectorPointer().getLoc();
252 const mlir::Value vector =
253 builder.createLoad(loc, dst.getVectorAddress());
254 const mlir::Value newVector = builder.create<cir::VecInsertOp>(
255 loc, vector, src.getValue(), dst.getVectorIdx());
256 builder.createStore(loc, newVector, dst.getVectorAddress());
257 return;
258 }
259
260 assert(dst.isBitField() && "Unknown LValue type");
262 return;
263
264 cgm.errorNYI(dst.getPointer().getLoc(),
265 "emitStoreThroughLValue: non-simple lvalue");
266 return;
267 }
268
270
271 assert(src.isScalar() && "Can't emit an aggregate store with this method");
272 emitStoreOfScalar(src.getValue(), dst, isInit);
273}
274
275static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
276 const VarDecl *vd) {
277 QualType t = e->getType();
278
279 // If it's thread_local, emit a call to its wrapper function instead.
281 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
282 cgf.cgm.errorNYI(e->getSourceRange(),
283 "emitGlobalVarDeclLValue: thread_local variable");
284
285 // Check if the variable is marked as declare target with link clause in
286 // device codegen.
287 if (cgf.getLangOpts().OpenMP)
288 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
289
290 // Traditional LLVM codegen handles thread local separately, CIR handles
291 // as part of getAddrOfGlobalVar.
292 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
293
295 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
296 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
297 if (realPtrTy != v.getType())
298 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
299
300 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
301 Address addr(v, realVarTy, alignment);
302 LValue lv;
303 if (vd->getType()->isReferenceType())
304 cgf.cgm.errorNYI(e->getSourceRange(),
305 "emitGlobalVarDeclLValue: reference type");
306 else
307 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
309 return lv;
310}
311
312void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
313 bool isVolatile, QualType ty,
314 bool isInit, bool isNontemporal) {
316
317 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
318 // Boolean vectors use `iN` as storage type.
319 if (clangVecTy->isExtVectorBoolType())
320 cgm.errorNYI(addr.getPointer().getLoc(),
321 "emitStoreOfScalar ExtVectorBoolType");
322
323 // Handle vectors of size 3 like size 4 for better performance.
324 const mlir::Type elementType = addr.getElementType();
325 const auto vecTy = cast<cir::VectorType>(elementType);
326
327 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
329 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
330 cgm.errorNYI(addr.getPointer().getLoc(),
331 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
332 }
333
334 value = emitToMemory(value, ty);
335
337
338 // Update the alloca with more info on initialization.
339 assert(addr.getPointer() && "expected pointer to exist");
340 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
341 if (currVarDecl && srcAlloca) {
342 const VarDecl *vd = currVarDecl;
343 assert(vd && "VarDecl expected");
344 if (vd->hasInit())
345 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
346 }
347
348 assert(currSrcLoc && "must pass in source location");
349 builder.createStore(*currSrcLoc, value, addr, isVolatile);
350
351 if (isNontemporal) {
352 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
353 return;
354 }
355
357}
358
359// TODO: Replace this with a proper TargetInfo function call.
360/// Helper method to check if the underlying ABI is AAPCS
361static bool isAAPCS(const TargetInfo &targetInfo) {
362 return targetInfo.getABI().starts_with("aapcs");
363}
364
366 LValue dst) {
367
368 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
369 mlir::Type resLTy = convertTypeForMem(dst.getType());
370 Address ptr = dst.getBitFieldAddress();
371
372 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
373 dst.isVolatileQualified() &&
374 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
375
376 mlir::Value dstAddr = dst.getAddress().getPointer();
377
378 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
379 ptr.getElementType(), src.getValue(), info,
380 dst.isVolatileQualified(), useVoaltile);
381}
382
384 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
385
386 // Get the output type.
387 mlir::Type resLTy = convertType(lv.getType());
388 Address ptr = lv.getBitFieldAddress();
389
390 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
391 isAAPCS(cgm.getTarget());
392
393 mlir::Value field =
394 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
395 info, lv.isVolatile(), useVoaltile);
397 return RValue::get(field);
398}
399
401 const FieldDecl *field,
402 mlir::Type fieldType,
403 unsigned index) {
404 mlir::Location loc = getLoc(field->getLocation());
405 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
407 cir::GetMemberOp sea = getBuilder().createGetMember(
408 loc, fieldPtr, base.getPointer(), field->getName(),
409 rec.isUnion() ? field->getFieldIndex() : index);
411 rec.getElementOffset(cgm.getDataLayout().layout, index));
412 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
413}
414
416 const FieldDecl *field) {
417 LValueBaseInfo baseInfo = base.getBaseInfo();
418 const CIRGenRecordLayout &layout =
419 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
420 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
421
423
424 unsigned idx = layout.getCIRFieldNo(field);
425 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
426
427 mlir::Location loc = getLoc(field->getLocation());
428 if (addr.getElementType() != info.storageType)
429 addr = builder.createElementBitCast(loc, addr, info.storageType);
430
431 QualType fieldType =
433 // TODO(cir): Support TBAA for bit fields.
435 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
436 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
437}
438
440 LValueBaseInfo baseInfo = base.getBaseInfo();
441
442 if (field->isBitField())
443 return emitLValueForBitField(base, field);
444
445 QualType fieldType = field->getType();
446 const RecordDecl *rec = field->getParent();
447 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
448 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
450
451 Address addr = base.getAddress();
452 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
453 if (cgm.getCodeGenOpts().StrictVTablePointers &&
454 classDecl->isDynamicClass()) {
455 cgm.errorNYI(field->getSourceRange(),
456 "emitLValueForField: strict vtable for dynamic class");
457 }
458 }
459
460 unsigned recordCVR = base.getVRQualifiers();
461
462 llvm::StringRef fieldName = field->getName();
463 unsigned fieldIndex;
464 if (cgm.lambdaFieldToName.count(field))
465 fieldName = cgm.lambdaFieldToName[field];
466
467 if (rec->isUnion())
468 fieldIndex = field->getFieldIndex();
469 else {
470 const CIRGenRecordLayout &layout =
471 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
472 fieldIndex = layout.getCIRFieldNo(field);
473 }
474
475 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
477
478 // If this is a reference field, load the reference right now.
479 if (fieldType->isReferenceType()) {
481 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
482 if (recordCVR & Qualifiers::Volatile)
483 refLVal.getQuals().addVolatile();
484 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
485 &fieldBaseInfo);
486
487 // Qualifiers on the struct don't apply to the referencee.
488 recordCVR = 0;
489 fieldType = fieldType->getPointeeType();
490 }
491
492 if (field->hasAttr<AnnotateAttr>()) {
493 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
494 return LValue();
495 }
496
497 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
498 lv.getQuals().addCVRQualifiers(recordCVR);
499
500 // __weak attribute on a field is ignored.
502 cgm.errorNYI(field->getSourceRange(),
503 "emitLValueForField: __weak attribute");
504 return LValue();
505 }
506
507 return lv;
508}
509
511 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
512 QualType fieldType = field->getType();
513
514 if (!fieldType->isReferenceType())
515 return emitLValueForField(base, field);
516
517 const CIRGenRecordLayout &layout =
518 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
519 unsigned fieldIndex = layout.getCIRFieldNo(field);
520
521 Address v =
522 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
523
524 // Make sure that the address is pointing to the right type.
525 mlir::Type memTy = convertTypeForMem(fieldType);
526 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
527
528 // TODO: Generate TBAA information that describes this access as a structure
529 // member access and not just an access to an object of the field's type. This
530 // should be similar to what we do in EmitLValueForField().
531 LValueBaseInfo baseInfo = base.getBaseInfo();
532 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
533 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
535 return makeAddrLValue(v, fieldType, fieldBaseInfo);
536}
537
538mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
539 // Bool has a different representation in memory than in registers,
540 // but in ClangIR, it is simply represented as a cir.bool value.
541 // This function is here as a placeholder for possible future changes.
542 return value;
543}
544
545void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
546 bool isInit) {
547 if (lvalue.getType()->isConstantMatrixType()) {
548 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
549 return;
550 }
551
552 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
553 lvalue.getType(), isInit, /*isNontemporal=*/false);
554}
555
556mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
557 QualType ty, SourceLocation loc,
558 LValueBaseInfo baseInfo) {
560 mlir::Type eltTy = addr.getElementType();
561
562 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
563 if (clangVecTy->isExtVectorBoolType()) {
564 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
565 return nullptr;
566 }
567
568 const auto vecTy = cast<cir::VectorType>(eltTy);
569
570 // Handle vectors of size 3 like size 4 for better performance.
572 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
573 cgm.errorNYI(addr.getPointer().getLoc(),
574 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
575 }
576
578 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
579 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
580 cgm.errorNYI("emitLoadOfScalar: load atomic");
581
582 if (mlir::isa<cir::VoidType>(eltTy))
583 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
584
586
587 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
588 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
589 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
590
591 return loadOp;
592}
593
595 SourceLocation loc) {
598 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
599 lvalue.getType(), loc, lvalue.getBaseInfo());
600}
601
602/// Given an expression that represents a value lvalue, this
603/// method emits the address of the lvalue, then loads the result as an rvalue,
604/// returning the rvalue.
606 assert(!lv.getType()->isFunctionType());
607 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
608
609 if (lv.isBitField())
610 return emitLoadOfBitfieldLValue(lv, loc);
611
612 if (lv.isSimple())
613 return RValue::get(emitLoadOfScalar(lv, loc));
614
615 if (lv.isVectorElt()) {
616 const mlir::Value load =
617 builder.createLoad(getLoc(loc), lv.getVectorAddress());
618 return RValue::get(builder.create<cir::VecExtractOp>(getLoc(loc), load,
619 lv.getVectorIdx()));
620 }
621
622 cgm.errorNYI(loc, "emitLoadOfLValue");
623 return RValue::get(nullptr);
624}
625
626static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
628 return cgm.getAddrOfFunction(gd);
629}
630
632 mlir::Value thisValue) {
633 return cgf.emitLValueForLambdaField(fd, thisValue);
634}
635
636/// Given that we are currently emitting a lambda, emit an l-value for
637/// one of its members.
638///
640 mlir::Value thisValue) {
641 bool hasExplicitObjectParameter = false;
642 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
643 LValue lambdaLV;
644 if (methD) {
645 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
646 assert(methD->getParent()->isLambda());
647 assert(methD->getParent() == field->getParent());
648 }
649 if (hasExplicitObjectParameter) {
650 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
651 } else {
652 QualType lambdaTagType =
654 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
655 }
656 return emitLValueForField(lambdaLV, field);
657}
658
662
663static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
664 GlobalDecl gd) {
665 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
666 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
667 mlir::Location loc = cgf.getLoc(e->getSourceRange());
668 CharUnits align = cgf.getContext().getDeclAlign(fd);
669
671
672 mlir::Type fnTy = funcOp.getFunctionType();
673 mlir::Type ptrTy = cir::PointerType::get(fnTy);
674 mlir::Value addr = cgf.getBuilder().create<cir::GetGlobalOp>(
675 loc, ptrTy, funcOp.getSymName());
676
677 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
678 fnTy = cgf.convertType(fd->getType());
679 ptrTy = cir::PointerType::get(fnTy);
680
681 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
682 cir::CastKind::bitcast, addr);
683 }
684
685 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
687}
688
689/// Determine whether we can emit a reference to \p vd from the current
690/// context, despite not necessarily having seen an odr-use of the variable in
691/// this context.
692/// TODO(cir): This could be shared with classic codegen.
694 const DeclRefExpr *e,
695 const VarDecl *vd) {
696 // For a variable declared in an enclosing scope, do not emit a spurious
697 // reference even if we have a capture, as that will emit an unwarranted
698 // reference to our capture state, and will likely generate worse code than
699 // emitting a local copy.
701 return false;
702
703 // For a local declaration declared in this function, we can always reference
704 // it even if we don't have an odr-use.
705 if (vd->hasLocalStorage()) {
706 return vd->getDeclContext() ==
707 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
708 }
709
710 // For a global declaration, we can emit a reference to it if we know
711 // for sure that we are able to emit a definition of it.
712 vd = vd->getDefinition(cgf.getContext());
713 if (!vd)
714 return false;
715
716 // Don't emit a spurious reference if it might be to a variable that only
717 // exists on a different device / target.
718 // FIXME: This is unnecessarily broad. Check whether this would actually be a
719 // cross-target reference.
720 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
721 cgf.getLangOpts().OpenCL) {
722 return false;
723 }
724
725 // We can emit a spurious reference only if the linkage implies that we'll
726 // be emitting a non-interposable symbol that will be retained until link
727 // time.
728 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
729 case cir::GlobalLinkageKind::ExternalLinkage:
730 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
731 case cir::GlobalLinkageKind::WeakODRLinkage:
732 case cir::GlobalLinkageKind::InternalLinkage:
733 case cir::GlobalLinkageKind::PrivateLinkage:
734 return true;
735 default:
736 return false;
737 }
738}
739
741 const NamedDecl *nd = e->getDecl();
742 QualType ty = e->getType();
743
744 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
745 "should not emit an unevaluated operand");
746
747 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
748 // Global Named registers access via intrinsics only
749 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
750 !vd->isLocalVarDecl()) {
751 cgm.errorNYI(e->getSourceRange(),
752 "emitDeclRefLValue: Global Named registers access");
753 return LValue();
754 }
755
756 if (e->isNonOdrUse() == NOUR_Constant &&
757 (vd->getType()->isReferenceType() ||
758 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
759 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
760 return LValue();
761 }
762
763 // Check for captured variables.
765 vd = vd->getCanonicalDecl();
766 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
767 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
770 }
771 }
772
773 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
774 // Checks for omitted feature handling
781
782 // Check if this is a global variable
783 if (vd->hasLinkage() || vd->isStaticDataMember())
784 return emitGlobalVarDeclLValue(*this, e, vd);
785
786 Address addr = Address::invalid();
787
788 // The variable should generally be present in the local decl map.
789 auto iter = localDeclMap.find(vd);
790 if (iter != localDeclMap.end()) {
791 addr = iter->second;
792 } else {
793 // Otherwise, it might be static local we haven't emitted yet for some
794 // reason; most likely, because it's in an outer function.
795 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
796 }
797
798 // Drill into reference types.
799 LValue lv =
800 vd->getType()->isReferenceType()
804
805 // Statics are defined as globals, so they are not include in the function's
806 // symbol table.
807 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
808 "non-static locals should be already mapped");
809
810 return lv;
811 }
812
813 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
816 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
817 return LValue();
818 }
819 return emitLValue(bd->getBinding());
820 }
821
822 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
823 LValue lv = emitFunctionDeclLValue(*this, e, fd);
824
825 // Emit debuginfo for the function declaration if the target wants to.
826 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
828
829 return lv;
830 }
831
832 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
833 return LValue();
834}
835
837 QualType boolTy = getContext().BoolTy;
838 SourceLocation loc = e->getExprLoc();
839
841 if (e->getType()->getAs<MemberPointerType>()) {
842 cgm.errorNYI(e->getSourceRange(),
843 "evaluateExprAsBool: member pointer type");
844 return createDummyValue(getLoc(loc), boolTy);
845 }
846
848 if (!e->getType()->isAnyComplexType())
849 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
850
852 loc);
853}
854
857
858 // __extension__ doesn't affect lvalue-ness.
859 if (op == UO_Extension)
860 return emitLValue(e->getSubExpr());
861
862 switch (op) {
863 case UO_Deref: {
865 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
866
868 LValueBaseInfo baseInfo;
869 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
870
871 // Tag 'load' with deref attribute.
872 // FIXME: This misses some derefence cases and has problematic interactions
873 // with other operators.
874 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
875 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
876
877 LValue lv = makeAddrLValue(addr, t, baseInfo);
880 return lv;
881 }
882 case UO_Real:
883 case UO_Imag: {
884 LValue lv = emitLValue(e->getSubExpr());
885 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
886
887 // __real is valid on scalars. This is a faster way of testing that.
888 // __imag can only produce an rvalue on scalars.
889 if (e->getOpcode() == UO_Real &&
890 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
891 assert(e->getSubExpr()->getType()->isArithmeticType());
892 return lv;
893 }
894
896 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
897 mlir::Location loc = getLoc(e->getExprLoc());
898 Address component =
899 e->getOpcode() == UO_Real
900 ? builder.createComplexRealPtr(loc, lv.getAddress())
901 : builder.createComplexImagPtr(loc, lv.getAddress());
903 LValue elemLV = makeAddrLValue(component, elemTy);
904 elemLV.getQuals().addQualifiers(lv.getQuals());
905 return elemLV;
906 }
907 case UO_PreInc:
908 case UO_PreDec: {
909 cir::UnaryOpKind kind =
910 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
911 LValue lv = emitLValue(e->getSubExpr());
912
913 assert(e->isPrefix() && "Prefix operator in unexpected state!");
914
915 if (e->getType()->isAnyComplexType()) {
916 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
917 } else {
918 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
919 }
920
921 return lv;
922 }
923 case UO_Extension:
924 llvm_unreachable("UnaryOperator extension should be handled above!");
925 case UO_Plus:
926 case UO_Minus:
927 case UO_Not:
928 case UO_LNot:
929 case UO_AddrOf:
930 case UO_PostInc:
931 case UO_PostDec:
932 case UO_Coawait:
933 llvm_unreachable("UnaryOperator of non-lvalue kind!");
934 }
935 llvm_unreachable("Unknown unary operator kind!");
936}
937
938/// If the specified expr is a simple decay from an array to pointer,
939/// return the array subexpression.
940/// FIXME: this could be abstracted into a common AST helper.
941static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
942 // If this isn't just an array->pointer decay, bail out.
943 const auto *castExpr = dyn_cast<CastExpr>(e);
944 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
945 return nullptr;
946
947 // If this is a decay from variable width array, bail out.
948 const Expr *subExpr = castExpr->getSubExpr();
949 if (subExpr->getType()->isVariableArrayType())
950 return nullptr;
951
952 return subExpr;
953}
954
955static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
956 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
957 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
958 return constantOp.getValueAttr<cir::IntAttr>();
959 return {};
960}
961
962static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
963 CharUnits eltSize) {
964 // If we have a constant index, we can use the exact offset of the
965 // element we're accessing.
966 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
967 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
968 return arrayAlign.alignmentAtOffset(offset);
969 }
970 // Otherwise, use the worst-case alignment for any element.
971 return arrayAlign.alignmentOfArrayElement(eltSize);
972}
973
975 const VariableArrayType *vla) {
976 QualType eltType;
977 do {
978 eltType = vla->getElementType();
979 } while ((vla = astContext.getAsVariableArrayType(eltType)));
980 return eltType;
981}
982
983static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf,
984 mlir::Location beginLoc,
985 mlir::Location endLoc, mlir::Value ptr,
986 mlir::Type eltTy, mlir::Value idx,
987 bool shouldDecay) {
988 CIRGenModule &cgm = cgf.getCIRGenModule();
989 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
990 // that would enhance tracking this later in CIR?
992 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
993 shouldDecay);
994}
995
997 mlir::Location beginLoc,
998 mlir::Location endLoc, Address addr,
999 QualType eltType, mlir::Value idx,
1000 mlir::Location loc, bool shouldDecay) {
1001
1002 // Determine the element size of the statically-sized base. This is
1003 // the thing that the indices are expressed in terms of.
1004 if (const VariableArrayType *vla =
1005 cgf.getContext().getAsVariableArrayType(eltType)) {
1006 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1007 }
1008
1009 // We can use that to compute the best alignment of the element.
1010 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1011 const CharUnits eltAlign =
1012 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1013
1015 const mlir::Value eltPtr =
1016 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1017 addr.getElementType(), idx, shouldDecay);
1018 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1019 return Address(eltPtr, elementType, eltAlign);
1020}
1021
1022LValue
1025 cgm.errorNYI(e->getSourceRange(),
1026 "emitArraySubscriptExpr: ExtVectorElementExpr");
1028 }
1029
1030 if (getContext().getAsVariableArrayType(e->getType())) {
1031 cgm.errorNYI(e->getSourceRange(),
1032 "emitArraySubscriptExpr: VariableArrayType");
1034 }
1035
1036 if (e->getType()->getAs<ObjCObjectType>()) {
1037 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1039 }
1040
1041 // The index must always be an integer, which is not an aggregate. Emit it
1042 // in lexical order (this complexity is, sadly, required by C++17).
1043 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1044 "index was neither LHS nor RHS");
1045
1046 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1047 const mlir::Value idx = emitScalarExpr(e->getIdx());
1048
1049 // Extend or truncate the index type to 32 or 64-bits.
1050 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1051 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1052 cgm.errorNYI(e->getSourceRange(),
1053 "emitArraySubscriptExpr: index type cast");
1054 return idx;
1055 };
1056
1057 // If the base is a vector type, then we are forming a vector element
1058 // with this subscript.
1059 if (e->getBase()->getType()->isVectorType() &&
1061 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1062 const LValue lhs = emitLValue(e->getBase());
1063 return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(),
1064 lhs.getBaseInfo());
1065 }
1066
1067 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1068 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1069 LValue arrayLV;
1070 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1071 arrayLV = emitArraySubscriptExpr(ase);
1072 else
1073 arrayLV = emitLValue(array);
1074
1075 // Propagate the alignment from the array itself to the result.
1076 const Address addr = emitArraySubscriptPtr(
1077 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1078 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1079 /*shouldDecay=*/true);
1080
1081 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1082
1083 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1084 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1085 }
1086
1087 return lv;
1088 }
1089
1090 // The base must be a pointer; emit it with an estimate of its alignment.
1091 assert(e->getBase()->getType()->isPointerType() &&
1092 "The base must be a pointer");
1093
1094 LValueBaseInfo eltBaseInfo;
1095 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1096 // Propagate the alignment from the array itself to the result.
1097 const Address addxr = emitArraySubscriptPtr(
1098 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1099 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1100 /*shouldDecay=*/false);
1101
1102 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1103
1104 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1105 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1106 }
1107
1108 return lv;
1109}
1110
1112 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e);
1113 assert(globalOp.getAlignment() && "expected alignment for string literal");
1114 unsigned align = *(globalOp.getAlignment());
1115 mlir::Value addr =
1116 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1117 return makeAddrLValue(
1118 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1120}
1121
1122/// Casts are never lvalues unless that cast is to a reference type. If the cast
1123/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1124/// is needed by the code generator in an lvalue context, then it must mean that
1125/// we need the address of an aggregate in order to access one of its members.
1126/// This can happen for all the reasons that casts are permitted with aggregate
1127/// result, including noop aggregate casts, and cast from scalar to union.
1129 switch (e->getCastKind()) {
1130 case CK_ToVoid:
1131 case CK_BitCast:
1132 case CK_LValueToRValueBitCast:
1133 case CK_ArrayToPointerDecay:
1134 case CK_FunctionToPointerDecay:
1135 case CK_NullToMemberPointer:
1136 case CK_NullToPointer:
1137 case CK_IntegralToPointer:
1138 case CK_PointerToIntegral:
1139 case CK_PointerToBoolean:
1140 case CK_IntegralCast:
1141 case CK_BooleanToSignedIntegral:
1142 case CK_IntegralToBoolean:
1143 case CK_IntegralToFloating:
1144 case CK_FloatingToIntegral:
1145 case CK_FloatingToBoolean:
1146 case CK_FloatingCast:
1147 case CK_FloatingRealToComplex:
1148 case CK_FloatingComplexToReal:
1149 case CK_FloatingComplexToBoolean:
1150 case CK_FloatingComplexCast:
1151 case CK_FloatingComplexToIntegralComplex:
1152 case CK_IntegralRealToComplex:
1153 case CK_IntegralComplexToReal:
1154 case CK_IntegralComplexToBoolean:
1155 case CK_IntegralComplexCast:
1156 case CK_IntegralComplexToFloatingComplex:
1157 case CK_DerivedToBaseMemberPointer:
1158 case CK_BaseToDerivedMemberPointer:
1159 case CK_MemberPointerToBoolean:
1160 case CK_ReinterpretMemberPointer:
1161 case CK_AnyPointerToBlockPointerCast:
1162 case CK_ARCProduceObject:
1163 case CK_ARCConsumeObject:
1164 case CK_ARCReclaimReturnedObject:
1165 case CK_ARCExtendBlockObject:
1166 case CK_CopyAndAutoreleaseBlockObject:
1167 case CK_IntToOCLSampler:
1168 case CK_FloatingToFixedPoint:
1169 case CK_FixedPointToFloating:
1170 case CK_FixedPointCast:
1171 case CK_FixedPointToBoolean:
1172 case CK_FixedPointToIntegral:
1173 case CK_IntegralToFixedPoint:
1174 case CK_MatrixCast:
1175 case CK_HLSLVectorTruncation:
1176 case CK_HLSLArrayRValue:
1177 case CK_HLSLElementwiseCast:
1178 case CK_HLSLAggregateSplatCast:
1179 llvm_unreachable("unexpected cast lvalue");
1180
1181 case CK_Dependent:
1182 llvm_unreachable("dependent cast kind in IR gen!");
1183
1184 case CK_BuiltinFnToFnPtr:
1185 llvm_unreachable("builtin functions are handled elsewhere");
1186
1187 // These are never l-values; just use the aggregate emission code.
1188 case CK_NonAtomicToAtomic:
1189 case CK_AtomicToNonAtomic:
1190 case CK_Dynamic:
1191 case CK_ToUnion:
1192 case CK_BaseToDerived:
1193 case CK_AddressSpaceConversion:
1194 case CK_ObjCObjectLValueCast:
1195 case CK_VectorSplat:
1196 case CK_ConstructorConversion:
1197 case CK_UserDefinedConversion:
1198 case CK_CPointerToObjCPointerCast:
1199 case CK_BlockPointerToObjCPointerCast:
1200 case CK_LValueToRValue: {
1201 cgm.errorNYI(e->getSourceRange(),
1202 std::string("emitCastLValue for unhandled cast kind: ") +
1203 e->getCastKindName());
1204
1205 return {};
1206 }
1207
1208 case CK_LValueBitCast: {
1209 // This must be a reinterpret_cast (or c-style equivalent).
1210 const auto *ce = cast<ExplicitCastExpr>(e);
1211
1212 cgm.emitExplicitCastExprType(ce, this);
1213 LValue LV = emitLValue(e->getSubExpr());
1215 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1216
1217 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1218 }
1219
1220 case CK_NoOp: {
1221 // CK_NoOp can model a qualification conversion, which can remove an array
1222 // bound and change the IR type.
1223 LValue lv = emitLValue(e->getSubExpr());
1224 // Propagate the volatile qualifier to LValue, if exists in e.
1226 cgm.errorNYI(e->getSourceRange(),
1227 "emitCastLValue: NoOp changes volatile qual");
1228 if (lv.isSimple()) {
1229 Address v = lv.getAddress();
1230 if (v.isValid()) {
1231 mlir::Type ty = convertTypeForMem(e->getType());
1232 if (v.getElementType() != ty)
1233 cgm.errorNYI(e->getSourceRange(),
1234 "emitCastLValue: NoOp needs bitcast");
1235 }
1236 }
1237 return lv;
1238 }
1239
1240 case CK_UncheckedDerivedToBase:
1241 case CK_DerivedToBase: {
1242 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1243
1244 LValue lv = emitLValue(e->getSubExpr());
1245 Address thisAddr = lv.getAddress();
1246
1247 // Perform the derived-to-base conversion
1248 Address baseAddr =
1249 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1250 /*NullCheckValue=*/false, e->getExprLoc());
1251
1252 // TODO: Support accesses to members of base classes in TBAA. For now, we
1253 // conservatively pretend that the complete object is of the base class
1254 // type.
1256 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1257 }
1258
1259 case CK_ZeroToOCLOpaqueType:
1260 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1261 }
1262
1263 llvm_unreachable("Invalid cast kind");
1264}
1265
1267 const MemberExpr *me) {
1268 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1269 // Try to emit static variable member expressions as DREs.
1270 return DeclRefExpr::Create(
1272 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1273 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1274 }
1275 return nullptr;
1276}
1277
1279 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1281 return emitDeclRefLValue(dre);
1282 }
1283
1284 Expr *baseExpr = e->getBase();
1285 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1286 LValue baseLV;
1287 if (e->isArrow()) {
1288 LValueBaseInfo baseInfo;
1290 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1291 QualType ptrTy = baseExpr->getType()->getPointeeType();
1293 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1294 } else {
1296 baseLV = emitLValue(baseExpr);
1297 }
1298
1299 const NamedDecl *nd = e->getMemberDecl();
1300 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1301 LValue lv = emitLValueForField(baseLV, field);
1303 if (getLangOpts().OpenMP) {
1304 // If the member was explicitly marked as nontemporal, mark it as
1305 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1306 // to children as nontemporal too.
1307 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1308 }
1309 return lv;
1310 }
1311
1312 if (isa<FunctionDecl>(nd)) {
1313 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1314 return LValue();
1315 }
1316
1317 llvm_unreachable("Unhandled member declaration!");
1318}
1319
1320/// Evaluate an expression into a given memory location.
1322 Qualifiers quals, bool isInit) {
1323 // FIXME: This function should take an LValue as an argument.
1324 switch (getEvaluationKind(e->getType())) {
1325 case cir::TEK_Complex: {
1326 LValue lv = makeAddrLValue(location, e->getType());
1327 emitComplexExprIntoLValue(e, lv, isInit);
1328 return;
1329 }
1330
1331 case cir::TEK_Aggregate: {
1332 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1336 return;
1337 }
1338
1339 case cir::TEK_Scalar: {
1341 LValue lv = makeAddrLValue(location, e->getType());
1342 emitStoreThroughLValue(rv, lv);
1343 return;
1344 }
1345 }
1346
1347 llvm_unreachable("bad evaluation kind");
1348}
1349
1351 const MaterializeTemporaryExpr *m,
1352 const Expr *inner) {
1353 // TODO(cir): cgf.getTargetHooks();
1354 switch (m->getStorageDuration()) {
1355 case SD_FullExpression:
1356 case SD_Automatic: {
1357 QualType ty = inner->getType();
1358
1360
1361 // The temporary memory should be created in the same scope as the extending
1362 // declaration of the temporary materialization expression.
1363 cir::AllocaOp extDeclAlloca;
1364 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1365 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1366 if (extDeclAddrIter != cgf.localDeclMap.end())
1367 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1368 }
1369 mlir::OpBuilder::InsertPoint ip;
1370 if (extDeclAlloca)
1371 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1372 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1373 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1374 ip);
1375 }
1376 case SD_Thread:
1377 case SD_Static: {
1378 cgf.cgm.errorNYI(
1379 m->getSourceRange(),
1380 "createReferenceTemporary: static/thread storage duration");
1381 return Address::invalid();
1382 }
1383
1384 case SD_Dynamic:
1385 llvm_unreachable("temporary can't have dynamic storage duration");
1386 }
1387 llvm_unreachable("unknown storage duration");
1388}
1389
1391 const MaterializeTemporaryExpr *m,
1392 const Expr *e, Address referenceTemporary) {
1393 // Objective-C++ ARC:
1394 // If we are binding a reference to a temporary that has ownership, we
1395 // need to perform retain/release operations on the temporary.
1396 //
1397 // FIXME(ogcg): This should be looking at e, not m.
1398 if (m->getType().getObjCLifetime()) {
1399 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1400 return;
1401 }
1402
1404 if (dk == QualType::DK_none)
1405 return;
1406
1407 switch (m->getStorageDuration()) {
1408 case SD_Static:
1409 case SD_Thread: {
1410 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1411 if (const auto *classDecl =
1413 classDecl && !classDecl->hasTrivialDestructor())
1414 // Get the destructor for the reference temporary.
1415 referenceTemporaryDtor = classDecl->getDestructor();
1416
1417 if (!referenceTemporaryDtor)
1418 return;
1419
1420 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1421 "storage duration with destructors");
1422 break;
1423 }
1424
1425 case SD_FullExpression:
1426 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1428 break;
1429
1430 case SD_Automatic:
1431 cgf.cgm.errorNYI(e->getSourceRange(),
1432 "pushTemporaryCleanup: automatic storage duration");
1433 break;
1434
1435 case SD_Dynamic:
1436 llvm_unreachable("temporary cannot have dynamic storage duration");
1437 }
1438}
1439
1441 const MaterializeTemporaryExpr *m) {
1442 const Expr *e = m->getSubExpr();
1443
1444 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1445 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1446 "Reference should never be pseudo-strong!");
1447
1448 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1449 // as that will cause the lifetime adjustment to be lost for ARC
1450 auto ownership = m->getType().getObjCLifetime();
1451 if (ownership != Qualifiers::OCL_None &&
1452 ownership != Qualifiers::OCL_ExplicitNone) {
1453 cgm.errorNYI(e->getSourceRange(),
1454 "emitMaterializeTemporaryExpr: ObjCLifetime");
1455 return {};
1456 }
1457
1460 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1461
1462 for (const Expr *ignored : commaLHSs)
1463 emitIgnoredExpr(ignored);
1464
1465 if (isa<OpaqueValueExpr>(e)) {
1466 cgm.errorNYI(e->getSourceRange(),
1467 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1468 return {};
1469 }
1470
1471 // Create and initialize the reference temporary.
1472 Address object = createReferenceTemporary(*this, m, e);
1473
1474 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1475 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1476 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1477 return {};
1478 } else {
1480 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1481 }
1482 pushTemporaryCleanup(*this, m, e, object);
1483
1484 // Perform derived-to-base casts and/or field accesses, to get from the
1485 // temporary object we created (and, potentially, for which we extended
1486 // the lifetime) to the subobject we're binding the reference to.
1487 if (!adjustments.empty()) {
1488 cgm.errorNYI(e->getSourceRange(),
1489 "emitMaterializeTemporaryExpr: Adjustments");
1490 return {};
1491 }
1492
1493 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1494}
1495
1496LValue
1499
1500 auto it = opaqueLValues.find(e);
1501 if (it != opaqueLValues.end())
1502 return it->second;
1503
1504 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1505 return emitLValue(e->getSourceExpr());
1506}
1507
1508RValue
1511
1512 auto it = opaqueRValues.find(e);
1513 if (it != opaqueRValues.end())
1514 return it->second;
1515
1516 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1517 return emitAnyExpr(e->getSourceExpr());
1518}
1519
1521 if (e->isFileScope()) {
1522 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1523 return {};
1524 }
1525
1526 if (e->getType()->isVariablyModifiedType()) {
1527 cgm.errorNYI(e->getSourceRange(),
1528 "emitCompoundLiteralLValue: VariablyModifiedType");
1529 return {};
1530 }
1531
1532 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1533 ".compoundliteral");
1534 const Expr *initExpr = e->getInitializer();
1535 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1536
1537 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1538 /*Init*/ true);
1539
1540 // Block-scope compound literals are destroyed at the end of the enclosing
1541 // scope in C.
1542 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1543 cgm.errorNYI(e->getSourceRange(),
1544 "emitCompoundLiteralLValue: non C++ DestructedType");
1545 return {};
1546 }
1547
1548 return result;
1549}
1550
1552 RValue rv = emitCallExpr(e);
1553
1554 if (!rv.isScalar()) {
1555 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1556 return {};
1557 }
1558
1559 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1560 "Can't have a scalar return unless the return type is a "
1561 "reference type!");
1562
1564}
1565
1567 // Comma expressions just emit their LHS then their RHS as an l-value.
1568 if (e->getOpcode() == BO_Comma) {
1569 emitIgnoredExpr(e->getLHS());
1570 return emitLValue(e->getRHS());
1571 }
1572
1573 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) {
1574 cgm.errorNYI(e->getSourceRange(), "member pointers");
1575 return {};
1576 }
1577
1578 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1579
1580 // Note that in all of these cases, __block variables need the RHS
1581 // evaluated first just in case the variable gets moved by the RHS.
1582
1584 case cir::TEK_Scalar: {
1586 if (e->getLHS()->getType().getObjCLifetime() !=
1588 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1589 return {};
1590 }
1591
1592 RValue rv = emitAnyExpr(e->getRHS());
1593 LValue lv = emitLValue(e->getLHS());
1594
1595 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1596 if (lv.isBitField())
1598 else
1599 emitStoreThroughLValue(rv, lv);
1600
1601 if (getLangOpts().OpenMP) {
1602 cgm.errorNYI(e->getSourceRange(), "openmp");
1603 return {};
1604 }
1605
1606 return lv;
1607 }
1608
1609 case cir::TEK_Complex: {
1611 }
1612
1613 case cir::TEK_Aggregate:
1614 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1615 return {};
1616 }
1617 llvm_unreachable("bad evaluation kind");
1618}
1619
1620/// Emit code to compute the specified expression which
1621/// can have any type. The result is returned as an RValue struct.
1624 case cir::TEK_Scalar:
1625 return RValue::get(emitScalarExpr(e));
1626 case cir::TEK_Complex:
1628 case cir::TEK_Aggregate: {
1629 if (aggSlot.isIgnored())
1630 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1632 emitAggExpr(e, aggSlot);
1633 return aggSlot.asRValue();
1634 }
1635 }
1636 llvm_unreachable("bad evaluation kind");
1637}
1638
1639// Detect the unusual situation where an inline version is shadowed by a
1640// non-inline version. In that case we should pick the external one
1641// everywhere. That's GCC behavior too.
1643 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1644 if (!pd->isInlineBuiltinDeclaration())
1645 return false;
1646 return true;
1647}
1648
1649CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1650 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1651
1652 if (unsigned builtinID = fd->getBuiltinID()) {
1653 if (fd->getAttr<AsmLabelAttr>()) {
1654 cgm.errorNYI("AsmLabelAttr");
1655 }
1656
1657 StringRef ident = fd->getName();
1658 std::string fdInlineName = (ident + ".inline").str();
1659
1660 bool isPredefinedLibFunction =
1661 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1662 // Assume nobuiltins everywhere until we actually read the attributes.
1663 bool hasAttributeNoBuiltin = true;
1665
1666 // When directing calling an inline builtin, call it through it's mangled
1667 // name to make it clear it's not the actual builtin.
1668 auto fn = cast<cir::FuncOp>(curFn);
1669 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1670 cgm.errorNYI("Inline only builtin function calls");
1671 }
1672
1673 // Replaceable builtins provide their own implementation of a builtin. If we
1674 // are in an inline builtin implementation, avoid trivial infinite
1675 // recursion. Honor __attribute__((no_builtin("foo"))) or
1676 // __attribute__((no_builtin)) on the current function unless foo is
1677 // not a predefined library function which means we must generate the
1678 // builtin no matter what.
1679 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1680 return CIRGenCallee::forBuiltin(builtinID, fd);
1681 }
1682
1683 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1684
1685 assert(!cir::MissingFeatures::hip());
1686
1687 return CIRGenCallee::forDirect(callee, gd);
1688}
1689
1691 if (ty->isVoidType())
1692 return RValue::get(nullptr);
1693
1694 cgm.errorNYI("unsupported type for undef rvalue");
1695 return RValue::get(nullptr);
1696}
1697
1699 const CIRGenCallee &origCallee,
1700 const clang::CallExpr *e,
1702 // Get the actual function type. The callee type will always be a pointer to
1703 // function type or a block pointer type.
1704 assert(calleeTy->isFunctionPointerType() &&
1705 "Callee must have function pointer type!");
1706
1707 calleeTy = getContext().getCanonicalType(calleeTy);
1708 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
1709
1710 CIRGenCallee callee = origCallee;
1711
1712 if (getLangOpts().CPlusPlus)
1714
1715 const auto *fnType = cast<FunctionType>(pointeeTy);
1716
1718
1719 CallArgList args;
1721
1722 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
1723 e->getDirectCallee());
1724
1725 const CIRGenFunctionInfo &funcInfo =
1726 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
1727
1728 // C99 6.5.2.2p6:
1729 // If the expression that denotes the called function has a type that does
1730 // not include a prototype, [the default argument promotions are performed].
1731 // If the number of arguments does not equal the number of parameters, the
1732 // behavior is undefined. If the function is defined with a type that
1733 // includes a prototype, and either the prototype ends with an ellipsis (,
1734 // ...) or the types of the arguments after promotion are not compatible
1735 // with the types of the parameters, the behavior is undefined. If the
1736 // function is defined with a type that does not include a prototype, and
1737 // the types of the arguments after promotion are not compatible with those
1738 // of the parameters after promotion, the behavior is undefined [except in
1739 // some trivial cases].
1740 // That is, in the general case, we should assume that a call through an
1741 // unprototyped function type works like a *non-variadic* call. The way we
1742 // make this work is to cast to the exxact type fo the promoted arguments.
1743 if (isa<FunctionNoProtoType>(fnType)) {
1746 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
1747 // get non-variadic function type
1748 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
1749 calleeTy.getReturnType(), false);
1750 auto calleePtrTy = cir::PointerType::get(calleeTy);
1751
1752 mlir::Operation *fn = callee.getFunctionPointer();
1753 mlir::Value addr;
1754 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
1755 addr = builder.create<cir::GetGlobalOp>(
1756 getLoc(e->getSourceRange()),
1757 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
1758 } else {
1759 addr = fn->getResult(0);
1760 }
1761
1762 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
1763 callee.setFunctionPointer(fn);
1764 }
1765
1767 assert(!cir::MissingFeatures::hip());
1769
1770 cir::CIRCallOpInterface callOp;
1771 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
1772 getLoc(e->getExprLoc()));
1773
1775
1776 return callResult;
1777}
1778
1780 e = e->IgnoreParens();
1781
1782 // Look through function-to-pointer decay.
1783 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
1784 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
1785 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
1786 return emitCallee(implicitCast->getSubExpr());
1787 }
1788 // When performing an indirect call through a function pointer lvalue, the
1789 // function pointer lvalue is implicitly converted to an rvalue through an
1790 // lvalue-to-rvalue conversion.
1791 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
1792 "unexpected implicit cast on function pointers");
1793 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
1794 // Resolve direct calls.
1795 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
1796 return emitDirectCallee(funcDecl);
1797 } else if (isa<MemberExpr>(e)) {
1798 cgm.errorNYI(e->getSourceRange(),
1799 "emitCallee: call to member function is NYI");
1800 return {};
1801 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
1803 }
1804
1805 // Otherwise, we have an indirect reference.
1806 mlir::Value calleePtr;
1808 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
1809 calleePtr = emitScalarExpr(e);
1810 functionType = ptrType->getPointeeType();
1811 } else {
1812 functionType = e->getType();
1813 calleePtr = emitLValue(e).getPointer();
1814 }
1815 assert(functionType->isFunctionType());
1816
1817 GlobalDecl gd;
1818 if (const auto *vd =
1819 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
1820 gd = GlobalDecl(vd);
1821
1822 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
1823 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
1824 return callee;
1825}
1826
1830
1831 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
1833
1834 if (isa<CUDAKernelCallExpr>(e)) {
1835 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
1836 return RValue::get(nullptr);
1837 }
1838
1839 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
1840 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
1841 // operator member call.
1842 if (const CXXMethodDecl *md =
1843 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
1844 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
1845 // A CXXOperatorCallExpr is created even for explicit object methods, but
1846 // these should be treated like static function calls. Fall through to do
1847 // that.
1848 }
1849
1850 CIRGenCallee callee = emitCallee(e->getCallee());
1851
1852 if (callee.isBuiltin())
1853 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
1854 returnValue);
1855
1856 if (callee.isPseudoDestructor())
1858
1859 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
1860}
1861
1862/// Emit code to compute the specified expression, ignoring the result.
1864 if (e->isPRValue()) {
1866 emitAnyExpr(e);
1867 return;
1868 }
1869
1870 // Just emit it as an l-value and drop the result.
1871 emitLValue(e);
1872}
1873
1875 LValueBaseInfo *baseInfo) {
1877 assert(e->getType()->isArrayType() &&
1878 "Array to pointer decay must have array source type!");
1879
1880 // Expressions of array type can't be bitfields or vector elements.
1881 LValue lv = emitLValue(e);
1882 Address addr = lv.getAddress();
1883
1884 // If the array type was an incomplete type, we need to make sure
1885 // the decay ends up being the right type.
1886 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
1887
1888 if (e->getType()->isVariableArrayType())
1889 return addr;
1890
1891 [[maybe_unused]] auto pointeeTy =
1892 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
1893
1894 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
1895 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
1896 assert(pointeeTy == arrayTy);
1897
1898 // The result of this decay conversion points to an array element within the
1899 // base lvalue. However, since TBAA currently does not support representing
1900 // accesses to elements of member arrays, we conservatively represent accesses
1901 // to the pointee object as if it had no any base lvalue specified.
1902 // TODO: Support TBAA for member arrays.
1905
1906 mlir::Value ptr = builder.maybeBuildArrayDecay(
1907 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
1908 convertTypeForMem(eltType));
1909 return Address(ptr, addr.getAlignment());
1910}
1911
1912/// Given the address of a temporary variable, produce an r-value of its type.
1916 switch (getEvaluationKind(type)) {
1917 case cir::TEK_Complex:
1918 cgm.errorNYI(loc, "convertTempToRValue: complex type");
1919 return RValue::get(nullptr);
1920 case cir::TEK_Aggregate:
1921 cgm.errorNYI(loc, "convertTempToRValue: aggregate type");
1922 return RValue::get(nullptr);
1923 case cir::TEK_Scalar:
1924 return RValue::get(emitLoadOfScalar(lvalue, loc));
1925 }
1926 llvm_unreachable("bad evaluation kind");
1927}
1928
1929/// Emit an `if` on a boolean condition, filling `then` and `else` into
1930/// appropriated regions.
1931mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
1932 const Stmt *thenS,
1933 const Stmt *elseS) {
1934 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
1935 std::optional<mlir::Location> elseLoc;
1936 if (elseS)
1937 elseLoc = getLoc(elseS->getSourceRange());
1938
1939 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
1941 cond, /*thenBuilder=*/
1942 [&](mlir::OpBuilder &, mlir::Location) {
1943 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
1944 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
1945 },
1946 thenLoc,
1947 /*elseBuilder=*/
1948 [&](mlir::OpBuilder &, mlir::Location) {
1949 assert(elseLoc && "Invalid location for elseS.");
1950 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
1951 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
1952 },
1953 elseLoc);
1954
1955 return mlir::LogicalResult::success(resThen.succeeded() &&
1956 resElse.succeeded());
1957}
1958
1959/// Emit an `if` on a boolean condition, filling `then` and `else` into
1960/// appropriated regions.
1962 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
1963 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
1964 std::optional<mlir::Location> elseLoc) {
1965 // Attempt to be as accurate as possible with IfOp location, generate
1966 // one fused location that has either 2 or 4 total locations, depending
1967 // on else's availability.
1968 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
1969 if (elseLoc)
1970 ifLocs.push_back(*elseLoc);
1971 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
1972
1973 // Emit the code with the fully general case.
1974 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
1975 return builder.create<cir::IfOp>(loc, condV, elseLoc.has_value(),
1976 /*thenBuilder=*/thenBuilder,
1977 /*elseBuilder=*/elseBuilder);
1978}
1979
1980/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
1981mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
1982 const Expr *cond) {
1985 cond = cond->IgnoreParens();
1986
1987 // In LLVM the condition is reversed here for efficient codegen.
1988 // This should be done in CIR prior to LLVM lowering, if we do now
1989 // we can make CIR based diagnostics misleading.
1990 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
1992
1993 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
1994 Expr *trueExpr = condOp->getTrueExpr();
1995 Expr *falseExpr = condOp->getFalseExpr();
1996 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
1997
1998 mlir::Value ternaryOpRes =
1999 builder
2000 .create<cir::TernaryOp>(
2001 loc, condV, /*thenBuilder=*/
2002 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2003 mlir::Value lhs = emitScalarExpr(trueExpr);
2004 b.create<cir::YieldOp>(loc, lhs);
2005 },
2006 /*elseBuilder=*/
2007 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2008 mlir::Value rhs = emitScalarExpr(falseExpr);
2009 b.create<cir::YieldOp>(loc, rhs);
2010 })
2011 .getResult();
2012
2013 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2014 getContext().BoolTy, condOp->getExprLoc());
2015 }
2016
2017 if (isa<CXXThrowExpr>(cond)) {
2018 cgm.errorNYI("NYI");
2019 return createDummyValue(loc, cond->getType());
2020 }
2021
2022 // If the branch has a condition wrapped by __builtin_unpredictable,
2023 // create metadata that specifies that the branch is unpredictable.
2024 // Don't bother if not optimizing because that metadata would not be used.
2026
2027 // Emit the code with the fully general case.
2028 return evaluateExprAsBool(cond);
2029}
2030
2031mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2032 mlir::Location loc, CharUnits alignment,
2033 bool insertIntoFnEntryBlock,
2034 mlir::Value arraySize) {
2035 mlir::Block *entryBlock = insertIntoFnEntryBlock
2037 : curLexScope->getEntryBlock();
2038
2039 // If this is an alloca in the entry basic block of a cir.try and there's
2040 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2041 // scope instead. This is necessary in order to guarantee all SSA values are
2042 // reachable during cleanups.
2043 assert(!cir::MissingFeatures::tryOp());
2044
2045 return emitAlloca(name, ty, loc, alignment,
2046 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2047}
2048
2049mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2050 mlir::Location loc, CharUnits alignment,
2051 mlir::OpBuilder::InsertPoint ip,
2052 mlir::Value arraySize) {
2053 // CIR uses its own alloca address space rather than follow the target data
2054 // layout like original CodeGen. The data layout awareness should be done in
2055 // the lowering pass instead.
2057 cir::PointerType localVarPtrTy = builder.getPointerTo(ty);
2058 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2059
2060 mlir::Value addr;
2061 {
2062 mlir::OpBuilder::InsertionGuard guard(builder);
2063 builder.restoreInsertionPoint(ip);
2064 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2065 /*var type*/ ty, name, alignIntAttr);
2067 }
2068 return addr;
2069}
2070
2071// Note: this function also emit constructor calls to support a MSVC extensions
2072// allowing explicit constructor function call.
2075 const Expr *callee = ce->getCallee()->IgnoreParens();
2076
2077 if (isa<BinaryOperator>(callee)) {
2078 cgm.errorNYI(ce->getSourceRange(),
2079 "emitCXXMemberCallExpr: C++ binary operator");
2080 return RValue::get(nullptr);
2081 }
2082
2083 const auto *me = cast<MemberExpr>(callee);
2084 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2085
2086 if (md->isStatic()) {
2087 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2088 return RValue::get(nullptr);
2089 }
2090
2091 bool hasQualifier = me->hasQualifier();
2092 NestedNameSpecifier qualifier = me->getQualifier();
2093 bool isArrow = me->isArrow();
2094 const Expr *base = me->getBase();
2095
2097 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2098}
2099
2101 AggValueSlot dest) {
2102 assert(!dest.isIgnored() && "Must have a destination!");
2103 const CXXConstructorDecl *cd = e->getConstructor();
2104
2105 // If we require zero initialization before (or instead of) calling the
2106 // constructor, as can be the case with a non-user-provided default
2107 // constructor, emit the zero initialization now, unless destination is
2108 // already zeroed.
2109 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
2110 switch (e->getConstructionKind()) {
2114 e->getType());
2115 break;
2118 cgm.errorNYI(e->getSourceRange(),
2119 "emitCXXConstructExpr: base requires initialization");
2120 break;
2121 }
2122 }
2123
2124 // If this is a call to a trivial default constructor, do nothing.
2125 if (cd->isTrivial() && cd->isDefaultConstructor())
2126 return;
2127
2128 // Elide the constructor if we're constructing from a temporary
2129 if (getLangOpts().ElideConstructors && e->isElidable()) {
2130 // FIXME: This only handles the simplest case, where the source object is
2131 // passed directly as the first argument to the constructor. This
2132 // should also handle stepping through implicit casts and conversion
2133 // sequences which involve two steps, with a conversion operator
2134 // follwed by a converting constructor.
2135 const Expr *srcObj = e->getArg(0);
2136 assert(srcObj->isTemporaryObject(getContext(), cd->getParent()));
2137 assert(
2138 getContext().hasSameUnqualifiedType(e->getType(), srcObj->getType()));
2139 emitAggExpr(srcObj, dest);
2140 return;
2141 }
2142
2143 if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
2145 emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
2146 } else {
2147
2149 bool forVirtualBase = false;
2150 bool delegating = false;
2151
2152 switch (e->getConstructionKind()) {
2155 break;
2157 // We should be emitting a constructor; GlobalDecl will assert this
2158 type = curGD.getCtorType();
2159 delegating = true;
2160 break;
2162 forVirtualBase = true;
2163 [[fallthrough]];
2165 type = Ctor_Base;
2166 break;
2167 }
2168
2169 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
2170 }
2171}
2172
2174 // Emit the expression as an lvalue.
2175 LValue lv = emitLValue(e);
2176 assert(lv.isSimple());
2177 mlir::Value value = lv.getPointer();
2178
2180
2181 return RValue::get(value);
2182}
2183
2185 LValueBaseInfo *pointeeBaseInfo) {
2186 if (refLVal.isVolatile())
2187 cgm.errorNYI(loc, "load of volatile reference");
2188
2189 cir::LoadOp load =
2190 builder.create<cir::LoadOp>(loc, refLVal.getAddress().getElementType(),
2191 refLVal.getAddress().getPointer());
2192
2194
2195 QualType pointeeType = refLVal.getType()->getPointeeType();
2196 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2197 return Address(load, convertTypeForMem(pointeeType), align);
2198}
2199
2201 mlir::Location loc,
2202 QualType refTy,
2203 AlignmentSource source) {
2204 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2205 LValueBaseInfo pointeeBaseInfo;
2207 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2208 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2209 pointeeBaseInfo);
2210}
2211
2212void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2213 cir::TrapOp::create(builder, loc);
2214 if (createNewBlock)
2215 builder.createBlock(builder.getBlock()->getParent());
2216}
2217
2219 bool createNewBlock) {
2221 cir::UnreachableOp::create(builder, getLoc(loc));
2222 if (createNewBlock)
2223 builder.createBlock(builder.getBlock()->getParent());
2224}
2225
2226mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2227 clang::QualType qt) {
2228 mlir::Type t = convertType(qt);
2229 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2230 return builder.createDummyValue(loc, t, alignment);
2231}
2232
2233//===----------------------------------------------------------------------===//
2234// CIR builder helpers
2235//===----------------------------------------------------------------------===//
2236
2238 const Twine &name, Address *alloca,
2239 mlir::OpBuilder::InsertPoint ip) {
2240 // FIXME: Should we prefer the preferred type alignment here?
2241 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2242 alloca, ip);
2243}
2244
2246 mlir::Location loc, const Twine &name,
2247 Address *alloca,
2248 mlir::OpBuilder::InsertPoint ip) {
2249 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2250 /*ArraySize=*/nullptr, alloca, ip);
2251 if (ty->isConstantMatrixType()) {
2253 cgm.errorNYI(loc, "temporary matrix value");
2254 }
2255 return result;
2256}
2257
2258/// This creates a alloca and inserts it into the entry block of the
2259/// current region.
2261 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2262 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2263 cir::AllocaOp alloca = ip.isSet()
2264 ? createTempAlloca(ty, loc, name, ip, arraySize)
2265 : createTempAlloca(ty, loc, name, arraySize);
2266 alloca.setAlignmentAttr(cgm.getSize(align));
2267 return Address(alloca, ty, align);
2268}
2269
2270/// This creates a alloca and inserts it into the entry block. The alloca is
2271/// casted to default address space if necessary.
2273 mlir::Location loc, const Twine &name,
2274 mlir::Value arraySize,
2275 Address *allocaAddr,
2276 mlir::OpBuilder::InsertPoint ip) {
2277 Address alloca =
2278 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2279 if (allocaAddr)
2280 *allocaAddr = alloca;
2281 mlir::Value v = alloca.getPointer();
2282 // Alloca always returns a pointer in alloca address space, which may
2283 // be different from the type defined by the language. For example,
2284 // in C++ the auto variables are in the default address space. Therefore
2285 // cast alloca to the default address space when necessary.
2287 return Address(v, ty, align);
2288}
2289
2290/// This creates an alloca and inserts it into the entry block if \p ArraySize
2291/// is nullptr, otherwise inserts it at the current insertion point of the
2292/// builder.
2293cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2294 mlir::Location loc,
2295 const Twine &name,
2296 mlir::Value arraySize,
2297 bool insertIntoFnEntryBlock) {
2298 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2299 insertIntoFnEntryBlock, arraySize)
2300 .getDefiningOp());
2301}
2302
2303/// This creates an alloca and inserts it into the provided insertion point
2304cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2305 mlir::Location loc,
2306 const Twine &name,
2307 mlir::OpBuilder::InsertPoint ip,
2308 mlir::Value arraySize) {
2309 assert(ip.isSet() && "Insertion point is not set");
2310 return mlir::cast<cir::AllocaOp>(
2311 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2312 .getDefiningOp());
2313}
2314
2315/// Try to emit a reference to the given value without producing it as
2316/// an l-value. For many cases, this is just an optimization, but it avoids
2317/// us needing to emit global copies of variables if they're named without
2318/// triggering a formal use in a context where we can't emit a direct
2319/// reference to them, for instance if a block or lambda or a member of a
2320/// local class uses a const int variable or constexpr variable from an
2321/// enclosing function.
2322///
2323/// For named members of enums, this is the only way they are emitted.
2326 const ValueDecl *value = refExpr->getDecl();
2327
2328 // There is a lot more to do here, but for now only EnumConstantDecl is
2329 // supported.
2331
2332 // The value needs to be an enum constant or a constant variable.
2333 if (!isa<EnumConstantDecl>(value))
2334 return ConstantEmission();
2335
2336 Expr::EvalResult result;
2337 if (!refExpr->EvaluateAsRValue(result, getContext()))
2338 return ConstantEmission();
2339
2340 QualType resultType = refExpr->getType();
2341
2342 // As long as we're only handling EnumConstantDecl, there should be no
2343 // side-effects.
2344 assert(!result.HasSideEffects);
2345
2346 // Emit as a constant.
2347 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2348 // somewhat heavy refactoring...)
2349 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2350 refExpr->getLocation(), result.Val, resultType);
2351 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2352 assert(cstToEmit && "expected a typed attribute");
2353
2355
2356 return ConstantEmission::forValue(cstToEmit);
2357}
2358
2362 return tryEmitAsConstant(dre);
2363 return ConstantEmission();
2364}
2365
2367 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2368 assert(constant && "not a constant");
2369 if (constant.isReference()) {
2370 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2371 return {};
2372 }
2373 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2374}
2375
2376/// An LValue is a candidate for having its loads and stores be made atomic if
2377/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2378/// performing such an operation can be performed without a libcall.
2380 if (!cgm.getLangOpts().MSVolatile)
2381 return false;
2382
2383 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2384 return false;
2385}
#define V(N, I)
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3720
QualType getElementType() const
Definition TypeBase.h:3732
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
Opcode getOpcode() const
Definition Expr.h:4017
mlir::Value getPointer() const
Definition Address.h:81
mlir::Type getElementType() const
Definition Address.h:101
static Address invalid()
Definition Address.h:66
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:109
mlir::Type getType() const
Definition Address.h:93
bool isValid() const
Definition Address.h:67
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:112
An aggregate value slot.
IsZeroed_t isZeroed() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:121
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:183
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:97
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:125
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
unsigned getBuiltinID() const
Definition CIRGenCall.h:101
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:106
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:145
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:115
static ConstantEmission forValue(mlir::TypedAttr c)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitStringLiteralLValue(const StringLiteral *e)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored())
Emit code to compute the specified expression which can have any type.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
cir::FuncOp curFn
The function for which code is currently being generated.
void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
const clang::Qualifiers & getQuals() const
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:90
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:252
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
bool isElidable() const
Whether this construction is elidable.
Definition ExprCXX.h:1618
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
bool requiresZeroInitialization() const
Whether this construction first requires zero-initialization before the initializer is called.
Definition ExprCXX.h:1651
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
CXXConstructionKind getConstructionKind() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1660
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
bool isDefaultConstructor() const
Whether this constructor is a default constructor (C++ [class.ctor]p5), which can be used to default-...
Definition DeclCXX.cpp:2999
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
ConditionalOperator - The ?
Definition Expr.h:4325
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const
Determine whether the result of this expression is a temporary object of the given class type.
Definition Expr.cpp:3248
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3157
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3260
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4767
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3393
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4707
Represents a function declaration or definition.
Definition Decl.h:1999
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition Decl.h:2376
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5264
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4914
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4939
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4931
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4964
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3651
This represents a decl that may have a name.
Definition Decl.h:273
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8325
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4309
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3919
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8878
bool isBooleanType() const
Definition TypeBase.h:9008
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9174
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8621
bool isFunctionPointerType() const
Definition TypeBase.h:8589
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8683
bool isPointerType() const
Definition TypeBase.h:8522
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
bool isReferenceType() const
Definition TypeBase.h:8546
bool isVariableArrayType() const
Definition TypeBase.h:8633
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAnyComplexType() const
Definition TypeBase.h:8657
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9051
bool isAtomicType() const
Definition TypeBase.h:8704
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8518
bool isVectorType() const
Definition TypeBase.h:8661
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
Represents a variable declaration or definition.
Definition Decl.h:925
TLSKind getTLSKind() const
Definition Decl.cpp:2168
bool hasInit() const
Definition Decl.cpp:2398
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2366
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1183
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:951
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3964
Represents a GCC generic vector type.
Definition TypeBase.h:4173
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opLoadStoreThreadLocal()
static bool opAllocaNonGC()
static bool opGlobalThreadLocal()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool aggValueSlot()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612