clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
31#include <optional>
32
33using namespace clang;
34using namespace clang::CIRGen;
35using namespace cir;
36
37/// Get the address of a zero-sized field within a record. The resulting address
38/// doesn't necessarily have the right type.
40 const FieldDecl *field,
41 llvm::StringRef fieldName,
42 unsigned fieldIndex) {
43 if (field->isZeroSize(getContext())) {
44 cgm.errorNYI(field->getSourceRange(),
45 "emitAddrOfFieldStorage: zero-sized field");
46 return Address::invalid();
47 }
48
49 mlir::Location loc = getLoc(field->getLocation());
50
51 mlir::Type fieldType = convertType(field->getType());
52 auto fieldPtr = cir::PointerType::get(fieldType);
53 // For most cases fieldName is the same as field->getName() but for lambdas,
54 // which do not currently carry the name, so it can be passed down from the
55 // CaptureStmt.
56 cir::GetMemberOp memberAddr = builder.createGetMember(
57 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
58
59 // Retrieve layout information, compute alignment and return the final
60 // address.
61 const RecordDecl *rec = field->getParent();
62 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
63 unsigned idx = layout.getCIRFieldNo(field);
65 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
66 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
67}
68
69/// Given an expression of pointer type, try to
70/// derive a more accurate bound on the alignment of the pointer.
72 LValueBaseInfo *baseInfo) {
73 // We allow this with ObjC object pointers because of fragile ABIs.
74 assert(expr->getType()->isPointerType() ||
75 expr->getType()->isObjCObjectPointerType());
76 expr = expr->IgnoreParens();
77
78 // Casts:
79 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
80 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
81 cgm.emitExplicitCastExprType(ece);
82
83 switch (ce->getCastKind()) {
84 // Non-converting casts (but not C's implicit conversion from void*).
85 case CK_BitCast:
86 case CK_NoOp:
87 case CK_AddressSpaceConversion: {
88 if (const auto *ptrTy =
89 ce->getSubExpr()->getType()->getAs<PointerType>()) {
90 if (ptrTy->getPointeeType()->isVoidType())
91 break;
92
93 LValueBaseInfo innerBaseInfo;
95 Address addr =
96 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
97 if (baseInfo)
98 *baseInfo = innerBaseInfo;
99
100 if (isa<ExplicitCastExpr>(ce)) {
101 LValueBaseInfo targetTypeBaseInfo;
102
103 const QualType pointeeType = expr->getType()->getPointeeType();
104 const CharUnits align =
105 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
106
107 // If the source l-value is opaque, honor the alignment of the
108 // casted-to type.
109 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
110 if (baseInfo)
111 baseInfo->mergeForCast(targetTypeBaseInfo);
112 addr = Address(addr.getPointer(), addr.getElementType(), align);
113 }
114 }
115
117
118 const mlir::Type eltTy =
119 convertTypeForMem(expr->getType()->getPointeeType());
120 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
121 addr, eltTy);
123
124 return addr;
125 }
126 break;
127 }
128
129 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
130 case CK_ArrayToPointerDecay:
131 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
132
133 case CK_UncheckedDerivedToBase:
134 case CK_DerivedToBase: {
137 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
138 const CXXRecordDecl *derived =
139 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
140 return getAddressOfBaseClass(addr, derived, ce->path(),
142 ce->getExprLoc());
143 }
144
145 case CK_AnyPointerToBlockPointerCast:
146 case CK_BaseToDerived:
147 case CK_BaseToDerivedMemberPointer:
148 case CK_BlockPointerToObjCPointerCast:
149 case CK_BuiltinFnToFnPtr:
150 case CK_CPointerToObjCPointerCast:
151 case CK_DerivedToBaseMemberPointer:
152 case CK_Dynamic:
153 case CK_FunctionToPointerDecay:
154 case CK_IntegralToPointer:
155 case CK_LValueToRValue:
156 case CK_LValueToRValueBitCast:
157 case CK_NullToMemberPointer:
158 case CK_NullToPointer:
159 case CK_ReinterpretMemberPointer:
160 // Common pointer conversions, nothing to do here.
161 // TODO: Is there any reason to treat base-to-derived conversions
162 // specially?
163 break;
164
165 case CK_ARCConsumeObject:
166 case CK_ARCExtendBlockObject:
167 case CK_ARCProduceObject:
168 case CK_ARCReclaimReturnedObject:
169 case CK_AtomicToNonAtomic:
170 case CK_BooleanToSignedIntegral:
171 case CK_ConstructorConversion:
172 case CK_CopyAndAutoreleaseBlockObject:
173 case CK_Dependent:
174 case CK_FixedPointCast:
175 case CK_FixedPointToBoolean:
176 case CK_FixedPointToFloating:
177 case CK_FixedPointToIntegral:
178 case CK_FloatingCast:
179 case CK_FloatingComplexCast:
180 case CK_FloatingComplexToBoolean:
181 case CK_FloatingComplexToIntegralComplex:
182 case CK_FloatingComplexToReal:
183 case CK_FloatingRealToComplex:
184 case CK_FloatingToBoolean:
185 case CK_FloatingToFixedPoint:
186 case CK_FloatingToIntegral:
187 case CK_HLSLAggregateSplatCast:
188 case CK_HLSLArrayRValue:
189 case CK_HLSLElementwiseCast:
190 case CK_HLSLVectorTruncation:
191 case CK_HLSLMatrixTruncation:
192 case CK_IntToOCLSampler:
193 case CK_IntegralCast:
194 case CK_IntegralComplexCast:
195 case CK_IntegralComplexToBoolean:
196 case CK_IntegralComplexToFloatingComplex:
197 case CK_IntegralComplexToReal:
198 case CK_IntegralRealToComplex:
199 case CK_IntegralToBoolean:
200 case CK_IntegralToFixedPoint:
201 case CK_IntegralToFloating:
202 case CK_LValueBitCast:
203 case CK_MatrixCast:
204 case CK_MemberPointerToBoolean:
205 case CK_NonAtomicToAtomic:
206 case CK_ObjCObjectLValueCast:
207 case CK_PointerToBoolean:
208 case CK_PointerToIntegral:
209 case CK_ToUnion:
210 case CK_ToVoid:
211 case CK_UserDefinedConversion:
212 case CK_VectorSplat:
213 case CK_ZeroToOCLOpaqueType:
214 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
215 }
216 }
217
218 // Unary &
219 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
220 // TODO(cir): maybe we should use cir.unary for pointers here instead.
221 if (uo->getOpcode() == UO_AddrOf) {
222 LValue lv = emitLValue(uo->getSubExpr());
223 if (baseInfo)
224 *baseInfo = lv.getBaseInfo();
226 return lv.getAddress();
227 }
228 }
229
230 // std::addressof and variants.
231 if (auto const *call = dyn_cast<CallExpr>(expr)) {
232 switch (call->getBuiltinCallee()) {
233 default:
234 break;
235 case Builtin::BIaddressof:
236 case Builtin::BI__addressof:
237 case Builtin::BI__builtin_addressof: {
238 cgm.errorNYI(expr->getSourceRange(),
239 "emitPointerWithAlignment: builtin addressof");
240 return Address::invalid();
241 }
242 }
243 }
244
245 // Otherwise, use the alignment of the type.
247 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
248 /*forPointeeType=*/true, baseInfo);
249}
250
252 bool isInit) {
253 if (!dst.isSimple()) {
254 if (dst.isVectorElt()) {
255 // Read/modify/write the vector, inserting the new element
256 const mlir::Location loc = dst.getVectorPointer().getLoc();
257 const mlir::Value vector =
258 builder.createLoad(loc, dst.getVectorAddress());
259 const mlir::Value newVector = cir::VecInsertOp::create(
260 builder, loc, vector, src.getValue(), dst.getVectorIdx());
261 builder.createStore(loc, newVector, dst.getVectorAddress());
262 return;
263 }
264
265 assert(dst.isBitField() && "Unknown LValue type");
267 return;
268
269 cgm.errorNYI(dst.getPointer().getLoc(),
270 "emitStoreThroughLValue: non-simple lvalue");
271 return;
272 }
273
275
276 assert(src.isScalar() && "Can't emit an aggregate store with this method");
277 emitStoreOfScalar(src.getValue(), dst, isInit);
278}
279
280static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
281 const VarDecl *vd) {
282 QualType t = e->getType();
283
284 // If it's thread_local, emit a call to its wrapper function instead.
285 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
286 cgf.cgm.errorNYI(e->getSourceRange(),
287 "emitGlobalVarDeclLValue: thread_local variable");
288
289 // Check if the variable is marked as declare target with link clause in
290 // device codegen.
291 if (cgf.getLangOpts().OpenMP)
292 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
293
294 // Traditional LLVM codegen handles thread local separately, CIR handles
295 // as part of getAddrOfGlobalVar.
296 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
297
299 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
300 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
301 if (realPtrTy != v.getType())
302 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
303
304 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
305 Address addr(v, realVarTy, alignment);
306 LValue lv;
307 if (vd->getType()->isReferenceType())
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "emitGlobalVarDeclLValue: reference type");
310 else
311 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
313 return lv;
314}
315
316void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
317 bool isVolatile, QualType ty,
318 LValueBaseInfo baseInfo, bool isInit,
319 bool isNontemporal) {
320
321 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
322 // Boolean vectors use `iN` as storage type.
323 if (clangVecTy->isExtVectorBoolType())
324 cgm.errorNYI(addr.getPointer().getLoc(),
325 "emitStoreOfScalar ExtVectorBoolType");
326
327 // Handle vectors of size 3 like size 4 for better performance.
328 const mlir::Type elementType = addr.getElementType();
329 const auto vecTy = cast<cir::VectorType>(elementType);
330
331 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
333 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
334 cgm.errorNYI(addr.getPointer().getLoc(),
335 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
336 }
337
338 value = emitToMemory(value, ty);
339
341 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
342 if (ty->isAtomicType() ||
343 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
344 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
345 return;
346 }
347
348 // Update the alloca with more info on initialization.
349 assert(addr.getPointer() && "expected pointer to exist");
350 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
351 if (currVarDecl && srcAlloca) {
352 const VarDecl *vd = currVarDecl;
353 assert(vd && "VarDecl expected");
354 if (vd->hasInit())
355 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
356 }
357
358 assert(currSrcLoc && "must pass in source location");
359 builder.createStore(*currSrcLoc, value, addr, isVolatile);
360
361 if (isNontemporal) {
362 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
363 return;
364 }
365
367}
368
369// TODO: Replace this with a proper TargetInfo function call.
370/// Helper method to check if the underlying ABI is AAPCS
371static bool isAAPCS(const TargetInfo &targetInfo) {
372 return targetInfo.getABI().starts_with("aapcs");
373}
374
376 LValue dst) {
377
378 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
379 mlir::Type resLTy = convertTypeForMem(dst.getType());
380 Address ptr = dst.getBitFieldAddress();
381
382 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
383 dst.isVolatileQualified() &&
384 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
385
386 mlir::Value dstAddr = dst.getAddress().getPointer();
387
388 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
389 ptr.getElementType(), src.getValue(), info,
390 dst.isVolatileQualified(), useVoaltile);
391}
392
394 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
395
396 // Get the output type.
397 mlir::Type resLTy = convertType(lv.getType());
398 Address ptr = lv.getBitFieldAddress();
399
400 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
401 isAAPCS(cgm.getTarget());
402
403 mlir::Value field =
404 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
405 info, lv.isVolatile(), useVoaltile);
407 return RValue::get(field);
408}
409
411 const FieldDecl *field,
412 mlir::Type fieldType,
413 unsigned index) {
414 mlir::Location loc = getLoc(field->getLocation());
415 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
417 cir::GetMemberOp sea = getBuilder().createGetMember(
418 loc, fieldPtr, base.getPointer(), field->getName(),
419 rec.isUnion() ? field->getFieldIndex() : index);
421 rec.getElementOffset(cgm.getDataLayout().layout, index));
422 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
423}
424
426 const FieldDecl *field) {
427 LValueBaseInfo baseInfo = base.getBaseInfo();
428 const CIRGenRecordLayout &layout =
429 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
430 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
431
433
434 unsigned idx = layout.getCIRFieldNo(field);
435 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
436
437 mlir::Location loc = getLoc(field->getLocation());
438 if (addr.getElementType() != info.storageType)
439 addr = builder.createElementBitCast(loc, addr, info.storageType);
440
441 QualType fieldType =
443 // TODO(cir): Support TBAA for bit fields.
445 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
446 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
447}
448
450 LValueBaseInfo baseInfo = base.getBaseInfo();
451
452 if (field->isBitField())
453 return emitLValueForBitField(base, field);
454
455 QualType fieldType = field->getType();
456 const RecordDecl *rec = field->getParent();
457 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
458 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
460
461 Address addr = base.getAddress();
462 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
463 if (cgm.getCodeGenOpts().StrictVTablePointers &&
464 classDecl->isDynamicClass()) {
465 cgm.errorNYI(field->getSourceRange(),
466 "emitLValueForField: strict vtable for dynamic class");
467 }
468 }
469
470 unsigned recordCVR = base.getVRQualifiers();
471
472 llvm::StringRef fieldName = field->getName();
473 unsigned fieldIndex;
474 if (cgm.lambdaFieldToName.count(field))
475 fieldName = cgm.lambdaFieldToName[field];
476
477 if (rec->isUnion())
478 fieldIndex = field->getFieldIndex();
479 else {
480 const CIRGenRecordLayout &layout =
481 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
482 fieldIndex = layout.getCIRFieldNo(field);
483 }
484
485 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
487
488 // If this is a reference field, load the reference right now.
489 if (fieldType->isReferenceType()) {
491 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
492 if (recordCVR & Qualifiers::Volatile)
493 refLVal.getQuals().addVolatile();
494 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
495 &fieldBaseInfo);
496
497 // Qualifiers on the struct don't apply to the referencee.
498 recordCVR = 0;
499 fieldType = fieldType->getPointeeType();
500 }
501
502 if (field->hasAttr<AnnotateAttr>()) {
503 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
504 return LValue();
505 }
506
507 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
508 lv.getQuals().addCVRQualifiers(recordCVR);
509
510 // __weak attribute on a field is ignored.
512 cgm.errorNYI(field->getSourceRange(),
513 "emitLValueForField: __weak attribute");
514 return LValue();
515 }
516
517 return lv;
518}
519
521 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
522 QualType fieldType = field->getType();
523
524 if (!fieldType->isReferenceType())
525 return emitLValueForField(base, field);
526
527 const CIRGenRecordLayout &layout =
528 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
529 unsigned fieldIndex = layout.getCIRFieldNo(field);
530
531 Address v =
532 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
533
534 // Make sure that the address is pointing to the right type.
535 mlir::Type memTy = convertTypeForMem(fieldType);
536 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
537
538 // TODO: Generate TBAA information that describes this access as a structure
539 // member access and not just an access to an object of the field's type. This
540 // should be similar to what we do in EmitLValueForField().
541 LValueBaseInfo baseInfo = base.getBaseInfo();
542 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
543 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
545 return makeAddrLValue(v, fieldType, fieldBaseInfo);
546}
547
548/// Converts a scalar value from its primary IR type (as returned
549/// by ConvertType) to its load/store type.
550mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
551 if (auto *atomicTy = ty->getAs<AtomicType>())
552 ty = atomicTy->getValueType();
553
554 if (ty->isExtVectorBoolType()) {
555 cgm.errorNYI("emitToMemory: extVectorBoolType");
556 }
557
558 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
559 // kept as `cir.int<N>` until further lowering
560
561 return value;
562}
563
564mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
565 if (auto *atomicTy = ty->getAs<AtomicType>())
566 ty = atomicTy->getValueType();
567
569 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
570 }
571
572 return value;
573}
574
575void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
576 bool isInit) {
577 if (lvalue.getType()->isConstantMatrixType()) {
578 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
579 return;
580 }
581
582 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
583 lvalue.getType(), lvalue.getBaseInfo(), isInit,
584 /*isNontemporal=*/false);
585}
586
587mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
588 QualType ty, SourceLocation loc,
589 LValueBaseInfo baseInfo) {
590 // Traditional LLVM codegen handles thread local separately, CIR handles
591 // as part of getAddrOfGlobalVar (GetGlobalOp).
592 mlir::Type eltTy = addr.getElementType();
593
594 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
595 if (clangVecTy->isExtVectorBoolType()) {
596 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
597 return nullptr;
598 }
599
600 const auto vecTy = cast<cir::VectorType>(eltTy);
601
602 // Handle vectors of size 3 like size 4 for better performance.
604 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
605 cgm.errorNYI(addr.getPointer().getLoc(),
606 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
607 }
608
610 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
611 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
612 cgm.errorNYI("emitLoadOfScalar: load atomic");
613
614 if (mlir::isa<cir::VoidType>(eltTy))
615 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
616
618
619 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
620 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
621 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
622
623 return loadOp;
624}
625
627 SourceLocation loc) {
630 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
631 lvalue.getType(), loc, lvalue.getBaseInfo());
632}
633
634/// Given an expression that represents a value lvalue, this
635/// method emits the address of the lvalue, then loads the result as an rvalue,
636/// returning the rvalue.
638 assert(!lv.getType()->isFunctionType());
639 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
640
641 if (lv.isBitField())
642 return emitLoadOfBitfieldLValue(lv, loc);
643
644 if (lv.isSimple())
645 return RValue::get(emitLoadOfScalar(lv, loc));
646
647 if (lv.isVectorElt()) {
648 const mlir::Value load =
649 builder.createLoad(getLoc(loc), lv.getVectorAddress());
650 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
651 lv.getVectorIdx()));
652 }
653
654 if (lv.isExtVectorElt())
656
657 cgm.errorNYI(loc, "emitLoadOfLValue");
658 return RValue::get(nullptr);
659}
660
661int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
662 const mlir::ArrayAttr elts) {
663 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
664 return elt.getInt();
665}
666
667// If this is a reference to a subset of the elements of a vector, create an
668// appropriate shufflevector.
670 mlir::Location loc = lv.getExtVectorPointer().getLoc();
671 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
672
673 // HLSL allows treating scalars as one-element vectors. Converting the scalar
674 // IR value to a vector here allows the rest of codegen to behave as normal.
675 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
676 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
677 return {};
678 }
679
680 const mlir::ArrayAttr elts = lv.getExtVectorElts();
681
682 // If the result of the expression is a non-vector type, we must be extracting
683 // a single element. Just codegen as an extractelement.
684 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
685 if (!exprVecTy) {
686 int64_t indexValue = getAccessedFieldNo(0, elts);
687 cir::ConstantOp index =
688 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
689 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
690 }
691
692 // Always use shuffle vector to try to retain the original program structure
694 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
695 mask.push_back(getAccessedFieldNo(i, elts));
696
697 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
698 if (lv.getType()->isExtVectorBoolType()) {
699 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
700 return {};
701 }
702
703 return RValue::get(resultVec);
704}
705
706LValue
708 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
709 "unexpected binary operator opcode");
710
711 Address baseAddr = Address::invalid();
712 if (e->getOpcode() == BO_PtrMemD)
713 baseAddr = emitLValue(e->getLHS()).getAddress();
714 else
715 baseAddr = emitPointerWithAlignment(e->getLHS());
716
717 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
718
719 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
720
721 LValueBaseInfo baseInfo;
723 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
724 memberPtrTy, &baseInfo);
725
726 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
727}
728
729/// Generates lvalue for partial ext_vector access.
731 mlir::Location loc) {
732 Address vectorAddress = lv.getExtVectorAddress();
733 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
734 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
735 Address castToPointerElement =
736 vectorAddress.withElementType(builder, vectorElementTy);
737
738 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
739 unsigned idx = getAccessedFieldNo(0, extVecElts);
740 mlir::Value idxValue =
741 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
742
743 mlir::Value elementValue = builder.getArrayElement(
744 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
745 /*shouldDecay=*/false);
746
747 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
748 const CharUnits alignment =
749 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
750 return Address(elementValue, vectorElementTy, alignment);
751}
752
753static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
755 return cgm.getAddrOfFunction(gd);
756}
757
759 mlir::Value thisValue) {
760 return cgf.emitLValueForLambdaField(fd, thisValue);
761}
762
763/// Given that we are currently emitting a lambda, emit an l-value for
764/// one of its members.
765///
767 mlir::Value thisValue) {
768 bool hasExplicitObjectParameter = false;
769 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
770 LValue lambdaLV;
771 if (methD) {
772 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
773 assert(methD->getParent()->isLambda());
774 assert(methD->getParent() == field->getParent());
775 }
776 if (hasExplicitObjectParameter) {
777 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
778 } else {
779 QualType lambdaTagType =
781 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
782 }
783 return emitLValueForField(lambdaLV, field);
784}
785
789
790static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
791 GlobalDecl gd) {
792 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
793 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
794 mlir::Location loc = cgf.getLoc(e->getSourceRange());
795 CharUnits align = cgf.getContext().getDeclAlign(fd);
796
798
799 mlir::Type fnTy = funcOp.getFunctionType();
800 mlir::Type ptrTy = cir::PointerType::get(fnTy);
801 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
802 funcOp.getSymName());
803
804 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
805 fnTy = cgf.convertType(fd->getType());
806 ptrTy = cir::PointerType::get(fnTy);
807
808 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
809 cir::CastKind::bitcast, addr);
810 }
811
812 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
814}
815
816/// Determine whether we can emit a reference to \p vd from the current
817/// context, despite not necessarily having seen an odr-use of the variable in
818/// this context.
819/// TODO(cir): This could be shared with classic codegen.
821 const DeclRefExpr *e,
822 const VarDecl *vd) {
823 // For a variable declared in an enclosing scope, do not emit a spurious
824 // reference even if we have a capture, as that will emit an unwarranted
825 // reference to our capture state, and will likely generate worse code than
826 // emitting a local copy.
828 return false;
829
830 // For a local declaration declared in this function, we can always reference
831 // it even if we don't have an odr-use.
832 if (vd->hasLocalStorage()) {
833 return vd->getDeclContext() ==
834 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
835 }
836
837 // For a global declaration, we can emit a reference to it if we know
838 // for sure that we are able to emit a definition of it.
839 vd = vd->getDefinition(cgf.getContext());
840 if (!vd)
841 return false;
842
843 // Don't emit a spurious reference if it might be to a variable that only
844 // exists on a different device / target.
845 // FIXME: This is unnecessarily broad. Check whether this would actually be a
846 // cross-target reference.
847 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
848 cgf.getLangOpts().OpenCL) {
849 return false;
850 }
851
852 // We can emit a spurious reference only if the linkage implies that we'll
853 // be emitting a non-interposable symbol that will be retained until link
854 // time.
855 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
856 case cir::GlobalLinkageKind::ExternalLinkage:
857 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
858 case cir::GlobalLinkageKind::WeakODRLinkage:
859 case cir::GlobalLinkageKind::InternalLinkage:
860 case cir::GlobalLinkageKind::PrivateLinkage:
861 return true;
862 default:
863 return false;
864 }
865}
866
868 const NamedDecl *nd = e->getDecl();
869 QualType ty = e->getType();
870
871 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
872 "should not emit an unevaluated operand");
873
874 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
875 // Global Named registers access via intrinsics only
876 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
877 !vd->isLocalVarDecl()) {
878 cgm.errorNYI(e->getSourceRange(),
879 "emitDeclRefLValue: Global Named registers access");
880 return LValue();
881 }
882
883 if (e->isNonOdrUse() == NOUR_Constant &&
884 (vd->getType()->isReferenceType() ||
885 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
886 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
887 return LValue();
888 }
889
890 // Check for captured variables.
892 vd = vd->getCanonicalDecl();
893 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
894 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
897 }
898 }
899
900 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
901 // Checks for omitted feature handling
908
909 // Check if this is a global variable
910 if (vd->hasLinkage() || vd->isStaticDataMember())
911 return emitGlobalVarDeclLValue(*this, e, vd);
912
913 Address addr = Address::invalid();
914
915 // The variable should generally be present in the local decl map.
916 auto iter = localDeclMap.find(vd);
917 if (iter != localDeclMap.end()) {
918 addr = iter->second;
919 } else {
920 // Otherwise, it might be static local we haven't emitted yet for some
921 // reason; most likely, because it's in an outer function.
922 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
923 }
924
925 // Drill into reference types.
926 LValue lv =
927 vd->getType()->isReferenceType()
931
932 // Statics are defined as globals, so they are not include in the function's
933 // symbol table.
934 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
935 "non-static locals should be already mapped");
936
937 return lv;
938 }
939
940 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
943 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
944 return LValue();
945 }
946 return emitLValue(bd->getBinding());
947 }
948
949 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
950 LValue lv = emitFunctionDeclLValue(*this, e, fd);
951
952 // Emit debuginfo for the function declaration if the target wants to.
953 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
955
956 return lv;
957 }
958
959 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
960 return LValue();
961}
962
964 QualType boolTy = getContext().BoolTy;
965 SourceLocation loc = e->getExprLoc();
966
968 if (e->getType()->getAs<MemberPointerType>()) {
969 cgm.errorNYI(e->getSourceRange(),
970 "evaluateExprAsBool: member pointer type");
971 return createDummyValue(getLoc(loc), boolTy);
972 }
973
975 if (!e->getType()->isAnyComplexType())
976 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
977
979 loc);
980}
981
984
985 // __extension__ doesn't affect lvalue-ness.
986 if (op == UO_Extension)
987 return emitLValue(e->getSubExpr());
988
989 switch (op) {
990 case UO_Deref: {
992 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
993
995 LValueBaseInfo baseInfo;
996 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
997
998 // Tag 'load' with deref attribute.
999 // FIXME: This misses some derefence cases and has problematic interactions
1000 // with other operators.
1001 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1002 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1003
1004 LValue lv = makeAddrLValue(addr, t, baseInfo);
1007 return lv;
1008 }
1009 case UO_Real:
1010 case UO_Imag: {
1011 LValue lv = emitLValue(e->getSubExpr());
1012 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1013
1014 // __real is valid on scalars. This is a faster way of testing that.
1015 // __imag can only produce an rvalue on scalars.
1016 if (e->getOpcode() == UO_Real &&
1017 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1018 assert(e->getSubExpr()->getType()->isArithmeticType());
1019 return lv;
1020 }
1021
1023 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1024 mlir::Location loc = getLoc(e->getExprLoc());
1025 Address component =
1026 e->getOpcode() == UO_Real
1027 ? builder.createComplexRealPtr(loc, lv.getAddress())
1028 : builder.createComplexImagPtr(loc, lv.getAddress());
1030 LValue elemLV = makeAddrLValue(component, elemTy);
1031 elemLV.getQuals().addQualifiers(lv.getQuals());
1032 return elemLV;
1033 }
1034 case UO_PreInc:
1035 case UO_PreDec: {
1036 cir::UnaryOpKind kind =
1037 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
1038 LValue lv = emitLValue(e->getSubExpr());
1039
1040 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1041
1042 if (e->getType()->isAnyComplexType()) {
1043 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
1044 } else {
1045 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
1046 }
1047
1048 return lv;
1049 }
1050 case UO_Extension:
1051 llvm_unreachable("UnaryOperator extension should be handled above!");
1052 case UO_Plus:
1053 case UO_Minus:
1054 case UO_Not:
1055 case UO_LNot:
1056 case UO_AddrOf:
1057 case UO_PostInc:
1058 case UO_PostDec:
1059 case UO_Coawait:
1060 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1061 }
1062 llvm_unreachable("Unknown unary operator kind!");
1063}
1064
1065/// If the specified expr is a simple decay from an array to pointer,
1066/// return the array subexpression.
1067/// FIXME: this could be abstracted into a common AST helper.
1068static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1069 // If this isn't just an array->pointer decay, bail out.
1070 const auto *castExpr = dyn_cast<CastExpr>(e);
1071 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1072 return nullptr;
1073
1074 // If this is a decay from variable width array, bail out.
1075 const Expr *subExpr = castExpr->getSubExpr();
1076 if (subExpr->getType()->isVariableArrayType())
1077 return nullptr;
1078
1079 return subExpr;
1080}
1081
1082static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1083 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1084 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1085 return constantOp.getValueAttr<cir::IntAttr>();
1086 return {};
1087}
1088
1089static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1090 CharUnits eltSize) {
1091 // If we have a constant index, we can use the exact offset of the
1092 // element we're accessing.
1093 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1094 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1095 return arrayAlign.alignmentAtOffset(offset);
1096 }
1097 // Otherwise, use the worst-case alignment for any element.
1098 return arrayAlign.alignmentOfArrayElement(eltSize);
1099}
1100
1102 const VariableArrayType *vla) {
1103 QualType eltType;
1104 do {
1105 eltType = vla->getElementType();
1106 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1107 return eltType;
1108}
1109
1111 mlir::Location beginLoc,
1112 mlir::Location endLoc, mlir::Value ptr,
1113 mlir::Type eltTy, mlir::Value idx,
1114 bool shouldDecay) {
1115 CIRGenModule &cgm = cgf.getCIRGenModule();
1116 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1117 // that would enhance tracking this later in CIR?
1119 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1120 shouldDecay);
1121}
1122
1124 mlir::Location beginLoc,
1125 mlir::Location endLoc, Address addr,
1126 QualType eltType, mlir::Value idx,
1127 mlir::Location loc, bool shouldDecay) {
1128
1129 // Determine the element size of the statically-sized base. This is
1130 // the thing that the indices are expressed in terms of.
1131 if (const VariableArrayType *vla =
1132 cgf.getContext().getAsVariableArrayType(eltType)) {
1133 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1134 }
1135
1136 // We can use that to compute the best alignment of the element.
1137 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1138 const CharUnits eltAlign =
1139 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1140
1142 const mlir::Value eltPtr =
1143 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1144 addr.getElementType(), idx, shouldDecay);
1145 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1146 return Address(eltPtr, elementType, eltAlign);
1147}
1148
1149LValue
1151 if (e->getType()->getAs<ObjCObjectType>()) {
1152 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1154 }
1155
1156 // The index must always be an integer, which is not an aggregate. Emit it
1157 // in lexical order (this complexity is, sadly, required by C++17).
1158 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1159 "index was neither LHS nor RHS");
1160
1161 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1162 const mlir::Value idx = emitScalarExpr(e->getIdx());
1163
1164 // Extend or truncate the index type to 32 or 64-bits.
1165 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1166 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1167 cgm.errorNYI(e->getSourceRange(),
1168 "emitArraySubscriptExpr: index type cast");
1169 return idx;
1170 };
1171
1172 // If the base is a vector type, then we are forming a vector element
1173 // with this subscript.
1174 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1176 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1177 const LValue lv = emitLValue(e->getBase());
1178 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1179 lv.getBaseInfo());
1180 }
1181
1182 // The HLSL runtime handles subscript expressions on global resource arrays
1183 // and objects with HLSL buffer layouts.
1184 if (getLangOpts().HLSL) {
1185 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1186 return {};
1187 }
1188
1189 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1190
1191 // Handle the extvector case we ignored above.
1193 const LValue lv = emitLValue(e->getBase());
1194 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1195
1196 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1197 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1198 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1199 idx, cgm.getLoc(e->getExprLoc()),
1200 /*shouldDecay=*/false);
1201
1202 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1203 }
1204
1205 if (const VariableArrayType *vla =
1206 getContext().getAsVariableArrayType(e->getType())) {
1207 // The base must be a pointer, which is not an aggregate. Emit
1208 // it. It needs to be emitted first in case it's what captures
1209 // the VLA bounds.
1211
1212 // The element count here is the total number of non-VLA elements.
1213 mlir::Value numElements = getVLASize(vla).numElts;
1214 idx = builder.createIntCast(idx, numElements.getType());
1215
1216 // Effectively, the multiply by the VLA size is part of the GEP.
1217 // GEP indexes are signed, and scaling an index isn't permitted to
1218 // signed-overflow, so we use the same semantics for our explicit
1219 // multiply. We suppress this if overflow is not undefined behavior.
1220 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1223 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1224 overflowBehavior);
1225
1226 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1227 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1228 idx, cgm.getLoc(e->getExprLoc()),
1229 /*shouldDecay=*/false);
1230
1231 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1232 }
1233
1234 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1235 LValue arrayLV;
1236 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1237 arrayLV = emitArraySubscriptExpr(ase);
1238 else
1239 arrayLV = emitLValue(array);
1240
1241 // Propagate the alignment from the array itself to the result.
1242 const Address addr = emitArraySubscriptPtr(
1243 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1244 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1245 /*shouldDecay=*/true);
1246
1247 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1248
1249 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1250 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1251 }
1252
1253 return lv;
1254 }
1255
1256 // The base must be a pointer; emit it with an estimate of its alignment.
1257 assert(e->getBase()->getType()->isPointerType() &&
1258 "The base must be a pointer");
1259
1260 LValueBaseInfo eltBaseInfo;
1261 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1262 // Propagate the alignment from the array itself to the result.
1263 const Address addxr = emitArraySubscriptPtr(
1264 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1265 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1266 /*shouldDecay=*/false);
1267
1268 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1269
1270 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1271 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1272 }
1273
1274 return lv;
1275}
1276
1278 // Emit the base vector as an l-value.
1279 LValue base;
1280
1281 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1282 if (e->isArrow()) {
1283 // If it is a pointer to a vector, emit the address and form an lvalue with
1284 // it.
1285 LValueBaseInfo baseInfo;
1286 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1287 const auto *clangPtrTy =
1289 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1290 base.getQuals().removeObjCGCAttr();
1291 } else if (e->getBase()->isGLValue()) {
1292 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1293 // emit the base as an lvalue.
1294 assert(e->getBase()->getType()->isVectorType());
1295 base = emitLValue(e->getBase());
1296 } else {
1297 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1298 assert(e->getBase()->getType()->isVectorType() &&
1299 "Result must be a vector");
1300 mlir::Value vec = emitScalarExpr(e->getBase());
1301
1302 // Store the vector to memory (because LValue wants an address).
1303 QualType baseTy = e->getBase()->getType();
1304 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1305 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1306 cgm.errorNYI(e->getSourceRange(),
1307 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1308 return {};
1309 }
1310 builder.createStore(vec.getLoc(), vec, vecMem);
1311 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1312 }
1313
1314 QualType type =
1316
1317 // Encode the element access list into a vector of unsigned indices.
1319 e->getEncodedElementAccess(indices);
1320
1321 if (base.isSimple()) {
1322 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1323 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1324 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1325 base.getBaseInfo());
1326 }
1327
1328 cgm.errorNYI(e->getSourceRange(),
1329 "emitExtVectorElementExpr: isSimple is false");
1330 return {};
1331}
1332
1334 llvm::StringRef name) {
1335 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1336 assert(globalOp.getAlignment() && "expected alignment for string literal");
1337 unsigned align = *(globalOp.getAlignment());
1338 mlir::Value addr =
1339 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1340 return makeAddrLValue(
1341 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1343}
1344
1345/// Casts are never lvalues unless that cast is to a reference type. If the cast
1346/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1347/// is needed by the code generator in an lvalue context, then it must mean that
1348/// we need the address of an aggregate in order to access one of its members.
1349/// This can happen for all the reasons that casts are permitted with aggregate
1350/// result, including noop aggregate casts, and cast from scalar to union.
1352 switch (e->getCastKind()) {
1353 case CK_ToVoid:
1354 case CK_BitCast:
1355 case CK_LValueToRValueBitCast:
1356 case CK_ArrayToPointerDecay:
1357 case CK_FunctionToPointerDecay:
1358 case CK_NullToMemberPointer:
1359 case CK_NullToPointer:
1360 case CK_IntegralToPointer:
1361 case CK_PointerToIntegral:
1362 case CK_PointerToBoolean:
1363 case CK_IntegralCast:
1364 case CK_BooleanToSignedIntegral:
1365 case CK_IntegralToBoolean:
1366 case CK_IntegralToFloating:
1367 case CK_FloatingToIntegral:
1368 case CK_FloatingToBoolean:
1369 case CK_FloatingCast:
1370 case CK_FloatingRealToComplex:
1371 case CK_FloatingComplexToReal:
1372 case CK_FloatingComplexToBoolean:
1373 case CK_FloatingComplexCast:
1374 case CK_FloatingComplexToIntegralComplex:
1375 case CK_IntegralRealToComplex:
1376 case CK_IntegralComplexToReal:
1377 case CK_IntegralComplexToBoolean:
1378 case CK_IntegralComplexCast:
1379 case CK_IntegralComplexToFloatingComplex:
1380 case CK_DerivedToBaseMemberPointer:
1381 case CK_BaseToDerivedMemberPointer:
1382 case CK_MemberPointerToBoolean:
1383 case CK_ReinterpretMemberPointer:
1384 case CK_AnyPointerToBlockPointerCast:
1385 case CK_ARCProduceObject:
1386 case CK_ARCConsumeObject:
1387 case CK_ARCReclaimReturnedObject:
1388 case CK_ARCExtendBlockObject:
1389 case CK_CopyAndAutoreleaseBlockObject:
1390 case CK_IntToOCLSampler:
1391 case CK_FloatingToFixedPoint:
1392 case CK_FixedPointToFloating:
1393 case CK_FixedPointCast:
1394 case CK_FixedPointToBoolean:
1395 case CK_FixedPointToIntegral:
1396 case CK_IntegralToFixedPoint:
1397 case CK_MatrixCast:
1398 case CK_HLSLVectorTruncation:
1399 case CK_HLSLMatrixTruncation:
1400 case CK_HLSLArrayRValue:
1401 case CK_HLSLElementwiseCast:
1402 case CK_HLSLAggregateSplatCast:
1403 llvm_unreachable("unexpected cast lvalue");
1404
1405 case CK_Dependent:
1406 llvm_unreachable("dependent cast kind in IR gen!");
1407
1408 case CK_BuiltinFnToFnPtr:
1409 llvm_unreachable("builtin functions are handled elsewhere");
1410
1411 case CK_Dynamic: {
1412 LValue lv = emitLValue(e->getSubExpr());
1413 Address v = lv.getAddress();
1414 const auto *dce = cast<CXXDynamicCastExpr>(e);
1416 }
1417
1418 // These are never l-values; just use the aggregate emission code.
1419 case CK_NonAtomicToAtomic:
1420 case CK_AtomicToNonAtomic:
1421 case CK_ToUnion:
1422 case CK_ObjCObjectLValueCast:
1423 case CK_VectorSplat:
1424 case CK_ConstructorConversion:
1425 case CK_UserDefinedConversion:
1426 case CK_CPointerToObjCPointerCast:
1427 case CK_BlockPointerToObjCPointerCast:
1428 case CK_LValueToRValue: {
1429 cgm.errorNYI(e->getSourceRange(),
1430 std::string("emitCastLValue for unhandled cast kind: ") +
1431 e->getCastKindName());
1432
1433 return {};
1434 }
1435 case CK_AddressSpaceConversion: {
1436 LValue lv = emitLValue(e->getSubExpr());
1437 QualType destTy = getContext().getPointerType(e->getType());
1438
1439 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1440 cir::TargetAddressSpaceAttr srcAS;
1441 if (clang::isTargetAddressSpace(srcLangAS))
1442 srcAS = cir::toCIRTargetAddressSpace(getMLIRContext(), srcLangAS);
1443 else
1444 cgm.errorNYI(
1445 e->getSourceRange(),
1446 "emitCastLValue: address space conversion from unknown address "
1447 "space");
1448
1449 mlir::Value v = getTargetHooks().performAddrSpaceCast(
1450 *this, lv.getPointer(), srcAS, convertType(destTy));
1451
1453 lv.getAddress().getAlignment()),
1454 e->getType(), lv.getBaseInfo());
1455 }
1456
1457 case CK_LValueBitCast: {
1458 // This must be a reinterpret_cast (or c-style equivalent).
1459 const auto *ce = cast<ExplicitCastExpr>(e);
1460
1461 cgm.emitExplicitCastExprType(ce, this);
1462 LValue LV = emitLValue(e->getSubExpr());
1464 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1465
1466 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1467 }
1468
1469 case CK_NoOp: {
1470 // CK_NoOp can model a qualification conversion, which can remove an array
1471 // bound and change the IR type.
1472 LValue lv = emitLValue(e->getSubExpr());
1473 // Propagate the volatile qualifier to LValue, if exists in e.
1475 cgm.errorNYI(e->getSourceRange(),
1476 "emitCastLValue: NoOp changes volatile qual");
1477 if (lv.isSimple()) {
1478 Address v = lv.getAddress();
1479 if (v.isValid()) {
1480 mlir::Type ty = convertTypeForMem(e->getType());
1481 if (v.getElementType() != ty)
1482 cgm.errorNYI(e->getSourceRange(),
1483 "emitCastLValue: NoOp needs bitcast");
1484 }
1485 }
1486 return lv;
1487 }
1488
1489 case CK_UncheckedDerivedToBase:
1490 case CK_DerivedToBase: {
1491 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1492
1493 LValue lv = emitLValue(e->getSubExpr());
1494 Address thisAddr = lv.getAddress();
1495
1496 // Perform the derived-to-base conversion
1497 Address baseAddr =
1498 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1499 /*NullCheckValue=*/false, e->getExprLoc());
1500
1501 // TODO: Support accesses to members of base classes in TBAA. For now, we
1502 // conservatively pretend that the complete object is of the base class
1503 // type.
1505 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1506 }
1507
1508 case CK_BaseToDerived: {
1509 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1510 LValue lv = emitLValue(e->getSubExpr());
1511
1512 // Perform the base-to-derived conversion
1514 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1515 e->path(), /*NullCheckValue=*/false);
1516 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1517 // performed and the object is not of the derived type.
1519
1521 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1522 }
1523
1524 case CK_ZeroToOCLOpaqueType:
1525 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1526 }
1527
1528 llvm_unreachable("Invalid cast kind");
1529}
1530
1532 const MemberExpr *me) {
1533 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1534 // Try to emit static variable member expressions as DREs.
1535 return DeclRefExpr::Create(
1537 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1538 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1539 }
1540 return nullptr;
1541}
1542
1544 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1546 return emitDeclRefLValue(dre);
1547 }
1548
1549 Expr *baseExpr = e->getBase();
1550 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1551 LValue baseLV;
1552 if (e->isArrow()) {
1553 LValueBaseInfo baseInfo;
1555 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1556 QualType ptrTy = baseExpr->getType()->getPointeeType();
1558 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1559 } else {
1561 baseLV = emitLValue(baseExpr);
1562 }
1563
1564 const NamedDecl *nd = e->getMemberDecl();
1565 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1566 LValue lv = emitLValueForField(baseLV, field);
1568 if (getLangOpts().OpenMP) {
1569 // If the member was explicitly marked as nontemporal, mark it as
1570 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1571 // to children as nontemporal too.
1572 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1573 }
1574 return lv;
1575 }
1576
1577 if (isa<FunctionDecl>(nd)) {
1578 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1579 return LValue();
1580 }
1581
1582 llvm_unreachable("Unhandled member declaration!");
1583}
1584
1585/// Evaluate an expression into a given memory location.
1587 Qualifiers quals, bool isInit) {
1588 // FIXME: This function should take an LValue as an argument.
1589 switch (getEvaluationKind(e->getType())) {
1590 case cir::TEK_Complex: {
1591 LValue lv = makeAddrLValue(location, e->getType());
1592 emitComplexExprIntoLValue(e, lv, isInit);
1593 return;
1594 }
1595
1596 case cir::TEK_Aggregate: {
1597 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1601 return;
1602 }
1603
1604 case cir::TEK_Scalar: {
1606 LValue lv = makeAddrLValue(location, e->getType());
1607 emitStoreThroughLValue(rv, lv);
1608 return;
1609 }
1610 }
1611
1612 llvm_unreachable("bad evaluation kind");
1613}
1614
1616 const MaterializeTemporaryExpr *m,
1617 const Expr *inner) {
1618 // TODO(cir): cgf.getTargetHooks();
1619 switch (m->getStorageDuration()) {
1620 case SD_FullExpression:
1621 case SD_Automatic: {
1622 QualType ty = inner->getType();
1623
1625
1626 // The temporary memory should be created in the same scope as the extending
1627 // declaration of the temporary materialization expression.
1628 cir::AllocaOp extDeclAlloca;
1629 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1630 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1631 if (extDeclAddrIter != cgf.localDeclMap.end())
1632 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1633 }
1634 mlir::OpBuilder::InsertPoint ip;
1635 if (extDeclAlloca)
1636 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1637 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1638 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1639 ip);
1640 }
1641 case SD_Thread:
1642 case SD_Static: {
1643 cgf.cgm.errorNYI(
1644 m->getSourceRange(),
1645 "createReferenceTemporary: static/thread storage duration");
1646 return Address::invalid();
1647 }
1648
1649 case SD_Dynamic:
1650 llvm_unreachable("temporary can't have dynamic storage duration");
1651 }
1652 llvm_unreachable("unknown storage duration");
1653}
1654
1656 const MaterializeTemporaryExpr *m,
1657 const Expr *e, Address referenceTemporary) {
1658 // Objective-C++ ARC:
1659 // If we are binding a reference to a temporary that has ownership, we
1660 // need to perform retain/release operations on the temporary.
1661 //
1662 // FIXME(ogcg): This should be looking at e, not m.
1663 if (m->getType().getObjCLifetime()) {
1664 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1665 return;
1666 }
1667
1669 if (dk == QualType::DK_none)
1670 return;
1671
1672 switch (m->getStorageDuration()) {
1673 case SD_Static:
1674 case SD_Thread: {
1675 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1676 if (const auto *classDecl =
1678 classDecl && !classDecl->hasTrivialDestructor())
1679 // Get the destructor for the reference temporary.
1680 referenceTemporaryDtor = classDecl->getDestructor();
1681
1682 if (!referenceTemporaryDtor)
1683 return;
1684
1685 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1686 "storage duration with destructors");
1687 break;
1688 }
1689
1690 case SD_FullExpression:
1691 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1693 break;
1694
1695 case SD_Automatic:
1696 cgf.cgm.errorNYI(e->getSourceRange(),
1697 "pushTemporaryCleanup: automatic storage duration");
1698 break;
1699
1700 case SD_Dynamic:
1701 llvm_unreachable("temporary cannot have dynamic storage duration");
1702 }
1703}
1704
1706 const MaterializeTemporaryExpr *m) {
1707 const Expr *e = m->getSubExpr();
1708
1709 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1710 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1711 "Reference should never be pseudo-strong!");
1712
1713 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1714 // as that will cause the lifetime adjustment to be lost for ARC
1715 auto ownership = m->getType().getObjCLifetime();
1716 if (ownership != Qualifiers::OCL_None &&
1717 ownership != Qualifiers::OCL_ExplicitNone) {
1718 cgm.errorNYI(e->getSourceRange(),
1719 "emitMaterializeTemporaryExpr: ObjCLifetime");
1720 return {};
1721 }
1722
1725 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1726
1727 for (const Expr *ignored : commaLHSs)
1728 emitIgnoredExpr(ignored);
1729
1730 if (isa<OpaqueValueExpr>(e)) {
1731 cgm.errorNYI(e->getSourceRange(),
1732 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1733 return {};
1734 }
1735
1736 // Create and initialize the reference temporary.
1737 Address object = createReferenceTemporary(*this, m, e);
1738
1739 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1740 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1741 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1742 return {};
1743 } else {
1745 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1746 }
1747 pushTemporaryCleanup(*this, m, e, object);
1748
1749 // Perform derived-to-base casts and/or field accesses, to get from the
1750 // temporary object we created (and, potentially, for which we extended
1751 // the lifetime) to the subobject we're binding the reference to.
1752 if (!adjustments.empty()) {
1753 cgm.errorNYI(e->getSourceRange(),
1754 "emitMaterializeTemporaryExpr: Adjustments");
1755 return {};
1756 }
1757
1758 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1759}
1760
1761LValue
1764
1765 auto it = opaqueLValues.find(e);
1766 if (it != opaqueLValues.end())
1767 return it->second;
1768
1769 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1770 return emitLValue(e->getSourceExpr());
1771}
1772
1773RValue
1776
1777 auto it = opaqueRValues.find(e);
1778 if (it != opaqueRValues.end())
1779 return it->second;
1780
1781 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1782 return emitAnyExpr(e->getSourceExpr());
1783}
1784
1786 if (e->isFileScope()) {
1787 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1788 return {};
1789 }
1790
1791 if (e->getType()->isVariablyModifiedType())
1793
1794 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1795 ".compoundliteral");
1796 const Expr *initExpr = e->getInitializer();
1797 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1798
1799 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1800 /*Init*/ true);
1801
1802 // Block-scope compound literals are destroyed at the end of the enclosing
1803 // scope in C.
1804 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1805 cgm.errorNYI(e->getSourceRange(),
1806 "emitCompoundLiteralLValue: non C++ DestructedType");
1807 return {};
1808 }
1809
1810 return result;
1811}
1812
1814 RValue rv = emitCallExpr(e);
1815
1816 if (!rv.isScalar()) {
1817 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1818 return {};
1819 }
1820
1821 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1822 "Can't have a scalar return unless the return type is a "
1823 "reference type!");
1824
1826}
1827
1829 // Comma expressions just emit their LHS then their RHS as an l-value.
1830 if (e->getOpcode() == BO_Comma) {
1831 emitIgnoredExpr(e->getLHS());
1832 return emitLValue(e->getRHS());
1833 }
1834
1835 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1837
1838 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1839
1840 // Note that in all of these cases, __block variables need the RHS
1841 // evaluated first just in case the variable gets moved by the RHS.
1842
1844 case cir::TEK_Scalar: {
1846 if (e->getLHS()->getType().getObjCLifetime() !=
1848 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1849 return {};
1850 }
1851
1852 RValue rv = emitAnyExpr(e->getRHS());
1853 LValue lv = emitLValue(e->getLHS());
1854
1855 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1856 if (lv.isBitField())
1858 else
1859 emitStoreThroughLValue(rv, lv);
1860
1861 if (getLangOpts().OpenMP) {
1862 cgm.errorNYI(e->getSourceRange(), "openmp");
1863 return {};
1864 }
1865
1866 return lv;
1867 }
1868
1869 case cir::TEK_Complex: {
1871 }
1872
1873 case cir::TEK_Aggregate:
1874 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1875 return {};
1876 }
1877 llvm_unreachable("bad evaluation kind");
1878}
1879
1880/// Emit code to compute the specified expression which
1881/// can have any type. The result is returned as an RValue struct.
1883 bool ignoreResult) {
1885 case cir::TEK_Scalar:
1886 return RValue::get(emitScalarExpr(e, ignoreResult));
1887 case cir::TEK_Complex:
1889 case cir::TEK_Aggregate: {
1890 if (!ignoreResult && aggSlot.isIgnored())
1891 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1893 emitAggExpr(e, aggSlot);
1894 return aggSlot.asRValue();
1895 }
1896 }
1897 llvm_unreachable("bad evaluation kind");
1898}
1899
1900// Detect the unusual situation where an inline version is shadowed by a
1901// non-inline version. In that case we should pick the external one
1902// everywhere. That's GCC behavior too.
1904 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1905 if (!pd->isInlineBuiltinDeclaration())
1906 return false;
1907 return true;
1908}
1909
1910CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1911 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1912
1913 if (unsigned builtinID = fd->getBuiltinID()) {
1914 StringRef ident = cgm.getMangledName(gd);
1915 std::string fdInlineName = (ident + ".inline").str();
1916
1917 bool isPredefinedLibFunction =
1918 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1919 // Assume nobuiltins everywhere until we actually read the attributes.
1920 bool hasAttributeNoBuiltin = true;
1922
1923 // When directing calling an inline builtin, call it through it's mangled
1924 // name to make it clear it's not the actual builtin.
1925 auto fn = cast<cir::FuncOp>(curFn);
1926 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1927 cir::FuncOp clone =
1928 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1929
1930 if (!clone) {
1931 // Create a forward declaration - the body will be generated in
1932 // generateCode when the function definition is processed
1933 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1934 mlir::OpBuilder::InsertionGuard guard(builder);
1935 builder.setInsertionPointToStart(cgm.getModule().getBody());
1936
1937 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1938 calleeFunc.getFunctionType());
1939 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1940 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1941 clone.setSymVisibility("private");
1942 clone.setInlineKind(cir::InlineKind::AlwaysInline);
1943 }
1944 return CIRGenCallee::forDirect(clone, gd);
1945 }
1946
1947 // Replaceable builtins provide their own implementation of a builtin. If we
1948 // are in an inline builtin implementation, avoid trivial infinite
1949 // recursion. Honor __attribute__((no_builtin("foo"))) or
1950 // __attribute__((no_builtin)) on the current function unless foo is
1951 // not a predefined library function which means we must generate the
1952 // builtin no matter what.
1953 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1954 return CIRGenCallee::forBuiltin(builtinID, fd);
1955 }
1956
1957 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1958
1959 assert(!cir::MissingFeatures::hip());
1960
1961 return CIRGenCallee::forDirect(callee, gd);
1962}
1963
1965 if (ty->isVoidType())
1966 return RValue::get(nullptr);
1967
1968 cgm.errorNYI("unsupported type for undef rvalue");
1969 return RValue::get(nullptr);
1970}
1971
1973 const CIRGenCallee &origCallee,
1974 const clang::CallExpr *e,
1976 // Get the actual function type. The callee type will always be a pointer to
1977 // function type or a block pointer type.
1978 assert(calleeTy->isFunctionPointerType() &&
1979 "Callee must have function pointer type!");
1980
1981 calleeTy = getContext().getCanonicalType(calleeTy);
1982 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
1983
1984 CIRGenCallee callee = origCallee;
1985
1986 if (getLangOpts().CPlusPlus)
1988
1989 const auto *fnType = cast<FunctionType>(pointeeTy);
1990
1992
1993 CallArgList args;
1995
1996 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
1997 e->getDirectCallee());
1998
1999 const CIRGenFunctionInfo &funcInfo =
2000 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2001
2002 // C99 6.5.2.2p6:
2003 // If the expression that denotes the called function has a type that does
2004 // not include a prototype, [the default argument promotions are performed].
2005 // If the number of arguments does not equal the number of parameters, the
2006 // behavior is undefined. If the function is defined with a type that
2007 // includes a prototype, and either the prototype ends with an ellipsis (,
2008 // ...) or the types of the arguments after promotion are not compatible
2009 // with the types of the parameters, the behavior is undefined. If the
2010 // function is defined with a type that does not include a prototype, and
2011 // the types of the arguments after promotion are not compatible with those
2012 // of the parameters after promotion, the behavior is undefined [except in
2013 // some trivial cases].
2014 // That is, in the general case, we should assume that a call through an
2015 // unprototyped function type works like a *non-variadic* call. The way we
2016 // make this work is to cast to the exxact type fo the promoted arguments.
2017 if (isa<FunctionNoProtoType>(fnType)) {
2020 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2021 // get non-variadic function type
2022 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2023 calleeTy.getReturnType(), false);
2024 auto calleePtrTy = cir::PointerType::get(calleeTy);
2025
2026 mlir::Operation *fn = callee.getFunctionPointer();
2027 mlir::Value addr;
2028 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2029 addr = cir::GetGlobalOp::create(
2030 builder, getLoc(e->getSourceRange()),
2031 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2032 } else {
2033 addr = fn->getResult(0);
2034 }
2035
2036 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2037 callee.setFunctionPointer(fn);
2038 }
2039
2041 assert(!cir::MissingFeatures::hip());
2043
2044 cir::CIRCallOpInterface callOp;
2045 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2046 getLoc(e->getExprLoc()));
2047
2049
2050 return callResult;
2051}
2052
2054 e = e->IgnoreParens();
2055
2056 // Look through function-to-pointer decay.
2057 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2058 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2059 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2060 return emitCallee(implicitCast->getSubExpr());
2061 }
2062 // When performing an indirect call through a function pointer lvalue, the
2063 // function pointer lvalue is implicitly converted to an rvalue through an
2064 // lvalue-to-rvalue conversion.
2065 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2066 "unexpected implicit cast on function pointers");
2067 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2068 // Resolve direct calls.
2069 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2070 return emitDirectCallee(funcDecl);
2071 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2072 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2073 emitIgnoredExpr(me->getBase());
2074 return emitDirectCallee(fd);
2075 }
2076 // Else fall through to the indirect reference handling below.
2077 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2079 }
2080
2081 // Otherwise, we have an indirect reference.
2082 mlir::Value calleePtr;
2084 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2085 calleePtr = emitScalarExpr(e);
2086 functionType = ptrType->getPointeeType();
2087 } else {
2088 functionType = e->getType();
2089 calleePtr = emitLValue(e).getPointer();
2090 }
2091 assert(functionType->isFunctionType());
2092
2093 GlobalDecl gd;
2094 if (const auto *vd =
2095 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2096 gd = GlobalDecl(vd);
2097
2098 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2099 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2100 return callee;
2101}
2102
2106
2107 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2109
2110 if (isa<CUDAKernelCallExpr>(e)) {
2111 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
2112 return RValue::get(nullptr);
2113 }
2114
2115 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2116 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2117 // operator member call.
2118 if (const CXXMethodDecl *md =
2119 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2120 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2121 // A CXXOperatorCallExpr is created even for explicit object methods, but
2122 // these should be treated like static function calls. Fall through to do
2123 // that.
2124 }
2125
2126 CIRGenCallee callee = emitCallee(e->getCallee());
2127
2128 if (callee.isBuiltin())
2129 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2130 returnValue);
2131
2132 if (callee.isPseudoDestructor())
2134
2135 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2136}
2137
2138/// Emit code to compute the specified expression, ignoring the result.
2140 if (e->isPRValue()) {
2141 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2142 return;
2143 }
2144
2145 // Just emit it as an l-value and drop the result.
2146 emitLValue(e);
2147}
2148
2150 LValueBaseInfo *baseInfo) {
2152 assert(e->getType()->isArrayType() &&
2153 "Array to pointer decay must have array source type!");
2154
2155 // Expressions of array type can't be bitfields or vector elements.
2156 LValue lv = emitLValue(e);
2157 Address addr = lv.getAddress();
2158
2159 // If the array type was an incomplete type, we need to make sure
2160 // the decay ends up being the right type.
2161 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2162
2163 if (e->getType()->isVariableArrayType())
2164 return addr;
2165
2166 [[maybe_unused]] auto pointeeTy =
2167 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2168
2169 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2170 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2171 assert(pointeeTy == arrayTy);
2172
2173 // The result of this decay conversion points to an array element within the
2174 // base lvalue. However, since TBAA currently does not support representing
2175 // accesses to elements of member arrays, we conservatively represent accesses
2176 // to the pointee object as if it had no any base lvalue specified.
2177 // TODO: Support TBAA for member arrays.
2180
2181 mlir::Value ptr = builder.maybeBuildArrayDecay(
2182 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2183 convertTypeForMem(eltType));
2184 return Address(ptr, addr.getAlignment());
2185}
2186
2187/// Given the address of a temporary variable, produce an r-value of its type.
2191 switch (getEvaluationKind(type)) {
2192 case cir::TEK_Complex:
2193 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2194 case cir::TEK_Aggregate:
2195 return lvalue.asAggregateRValue();
2196 case cir::TEK_Scalar:
2197 return RValue::get(emitLoadOfScalar(lvalue, loc));
2198 }
2199 llvm_unreachable("bad evaluation kind");
2200}
2201
2202/// Emit an `if` on a boolean condition, filling `then` and `else` into
2203/// appropriated regions.
2204mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2205 const Stmt *thenS,
2206 const Stmt *elseS) {
2207 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2208 std::optional<mlir::Location> elseLoc;
2209 if (elseS)
2210 elseLoc = getLoc(elseS->getSourceRange());
2211
2212 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2214 cond, /*thenBuilder=*/
2215 [&](mlir::OpBuilder &, mlir::Location) {
2216 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2217 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2218 },
2219 thenLoc,
2220 /*elseBuilder=*/
2221 [&](mlir::OpBuilder &, mlir::Location) {
2222 assert(elseLoc && "Invalid location for elseS.");
2223 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2224 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2225 },
2226 elseLoc);
2227
2228 return mlir::LogicalResult::success(resThen.succeeded() &&
2229 resElse.succeeded());
2230}
2231
2232/// Emit an `if` on a boolean condition, filling `then` and `else` into
2233/// appropriated regions.
2235 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2236 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2237 std::optional<mlir::Location> elseLoc) {
2238 // Attempt to be as accurate as possible with IfOp location, generate
2239 // one fused location that has either 2 or 4 total locations, depending
2240 // on else's availability.
2241 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2242 if (elseLoc)
2243 ifLocs.push_back(*elseLoc);
2244 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2245
2246 // Emit the code with the fully general case.
2247 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2248 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2249 /*thenBuilder=*/thenBuilder,
2250 /*elseBuilder=*/elseBuilder);
2251}
2252
2253/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2254mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2255 const Expr *cond) {
2258 cond = cond->IgnoreParens();
2259
2260 // In LLVM the condition is reversed here for efficient codegen.
2261 // This should be done in CIR prior to LLVM lowering, if we do now
2262 // we can make CIR based diagnostics misleading.
2263 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2265
2266 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2267 Expr *trueExpr = condOp->getTrueExpr();
2268 Expr *falseExpr = condOp->getFalseExpr();
2269 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2270
2271 mlir::Value ternaryOpRes =
2272 cir::TernaryOp::create(
2273 builder, loc, condV, /*thenBuilder=*/
2274 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2275 mlir::Value lhs = emitScalarExpr(trueExpr);
2276 cir::YieldOp::create(b, loc, lhs);
2277 },
2278 /*elseBuilder=*/
2279 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2280 mlir::Value rhs = emitScalarExpr(falseExpr);
2281 cir::YieldOp::create(b, loc, rhs);
2282 })
2283 .getResult();
2284
2285 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2286 getContext().BoolTy, condOp->getExprLoc());
2287 }
2288
2289 if (isa<CXXThrowExpr>(cond)) {
2290 cgm.errorNYI("NYI");
2291 return createDummyValue(loc, cond->getType());
2292 }
2293
2294 // If the branch has a condition wrapped by __builtin_unpredictable,
2295 // create metadata that specifies that the branch is unpredictable.
2296 // Don't bother if not optimizing because that metadata would not be used.
2298
2299 // Emit the code with the fully general case.
2300 return evaluateExprAsBool(cond);
2301}
2302
2303mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2304 mlir::Location loc, CharUnits alignment,
2305 bool insertIntoFnEntryBlock,
2306 mlir::Value arraySize) {
2307 mlir::Block *entryBlock = insertIntoFnEntryBlock
2309 : curLexScope->getEntryBlock();
2310
2311 // If this is an alloca in the entry basic block of a cir.try and there's
2312 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2313 // scope instead. This is necessary in order to guarantee all SSA values are
2314 // reachable during cleanups.
2315 if (auto tryOp =
2316 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2317 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2318 entryBlock = &scopeOp.getScopeRegion().front();
2319 }
2320
2321 return emitAlloca(name, ty, loc, alignment,
2322 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2323}
2324
2325mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2326 mlir::Location loc, CharUnits alignment,
2327 mlir::OpBuilder::InsertPoint ip,
2328 mlir::Value arraySize) {
2329 // CIR uses its own alloca address space rather than follow the target data
2330 // layout like original CodeGen. The data layout awareness should be done in
2331 // the lowering pass instead.
2332 cir::PointerType localVarPtrTy =
2334 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2335
2336 mlir::Value addr;
2337 {
2338 mlir::OpBuilder::InsertionGuard guard(builder);
2339 builder.restoreInsertionPoint(ip);
2340 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2341 /*var type*/ ty, name, alignIntAttr, arraySize);
2343 }
2344 return addr;
2345}
2346
2347// Note: this function also emit constructor calls to support a MSVC extensions
2348// allowing explicit constructor function call.
2351 const Expr *callee = ce->getCallee()->IgnoreParens();
2352
2353 if (isa<BinaryOperator>(callee))
2355
2356 const auto *me = cast<MemberExpr>(callee);
2357 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2358
2359 if (md->isStatic()) {
2360 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2361 return RValue::get(nullptr);
2362 }
2363
2364 bool hasQualifier = me->hasQualifier();
2365 NestedNameSpecifier qualifier = me->getQualifier();
2366 bool isArrow = me->isArrow();
2367 const Expr *base = me->getBase();
2368
2370 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2371}
2372
2374 // Emit the expression as an lvalue.
2375 LValue lv = emitLValue(e);
2376 assert(lv.isSimple());
2377 mlir::Value value = lv.getPointer();
2378
2380
2381 return RValue::get(value);
2382}
2383
2385 LValueBaseInfo *pointeeBaseInfo) {
2386 if (refLVal.isVolatile())
2387 cgm.errorNYI(loc, "load of volatile reference");
2388
2389 cir::LoadOp load =
2390 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2391 refLVal.getAddress().getPointer());
2392
2394
2395 QualType pointeeType = refLVal.getType()->getPointeeType();
2396 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2397 return Address(load, convertTypeForMem(pointeeType), align);
2398}
2399
2401 mlir::Location loc,
2402 QualType refTy,
2403 AlignmentSource source) {
2404 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2405 LValueBaseInfo pointeeBaseInfo;
2407 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2408 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2409 pointeeBaseInfo);
2410}
2411
2412void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2413 cir::TrapOp::create(builder, loc);
2414 if (createNewBlock)
2415 builder.createBlock(builder.getBlock()->getParent());
2416}
2417
2419 bool createNewBlock) {
2421 cir::UnreachableOp::create(builder, getLoc(loc));
2422 if (createNewBlock)
2423 builder.createBlock(builder.getBlock()->getParent());
2424}
2425
2426mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2427 clang::QualType qt) {
2428 mlir::Type t = convertType(qt);
2429 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2430 return builder.createDummyValue(loc, t, alignment);
2431}
2432
2433//===----------------------------------------------------------------------===//
2434// CIR builder helpers
2435//===----------------------------------------------------------------------===//
2436
2438 const Twine &name, Address *alloca,
2439 mlir::OpBuilder::InsertPoint ip) {
2440 // FIXME: Should we prefer the preferred type alignment here?
2441 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2442 alloca, ip);
2443}
2444
2446 mlir::Location loc, const Twine &name,
2447 Address *alloca,
2448 mlir::OpBuilder::InsertPoint ip) {
2449 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2450 /*ArraySize=*/nullptr, alloca, ip);
2451 if (ty->isConstantMatrixType()) {
2453 cgm.errorNYI(loc, "temporary matrix value");
2454 }
2455 return result;
2456}
2457
2458/// This creates a alloca and inserts it into the entry block of the
2459/// current region.
2461 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2462 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2463 cir::AllocaOp alloca = ip.isSet()
2464 ? createTempAlloca(ty, loc, name, ip, arraySize)
2465 : createTempAlloca(ty, loc, name, arraySize);
2466 alloca.setAlignmentAttr(cgm.getSize(align));
2467 return Address(alloca, ty, align);
2468}
2469
2470/// This creates a alloca and inserts it into the entry block. The alloca is
2471/// casted to default address space if necessary.
2472// TODO(cir): Implement address space casting to match classic codegen's
2473// CreateTempAlloca behavior with DestLangAS parameter
2475 mlir::Location loc, const Twine &name,
2476 mlir::Value arraySize,
2477 Address *allocaAddr,
2478 mlir::OpBuilder::InsertPoint ip) {
2479 Address alloca =
2480 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2481 if (allocaAddr)
2482 *allocaAddr = alloca;
2483 mlir::Value v = alloca.getPointer();
2484 // Alloca always returns a pointer in alloca address space, which may
2485 // be different from the type defined by the language. For example,
2486 // in C++ the auto variables are in the default address space. Therefore
2487 // cast alloca to the default address space when necessary.
2488
2489 LangAS allocaAS = alloca.getAddressSpace()
2491 alloca.getAddressSpace().getValue().getUInt())
2496 getCIRAllocaAddressSpace().getValue().getUInt());
2497 }
2498
2499 if (dstTyAS != allocaAS) {
2501 builder.getPointerTo(ty, dstTyAS));
2502 }
2503 return Address(v, ty, align);
2504}
2505
2506/// This creates an alloca and inserts it into the entry block if \p ArraySize
2507/// is nullptr, otherwise inserts it at the current insertion point of the
2508/// builder.
2509cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2510 mlir::Location loc,
2511 const Twine &name,
2512 mlir::Value arraySize,
2513 bool insertIntoFnEntryBlock) {
2514 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2515 insertIntoFnEntryBlock, arraySize)
2516 .getDefiningOp());
2517}
2518
2519/// This creates an alloca and inserts it into the provided insertion point
2520cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2521 mlir::Location loc,
2522 const Twine &name,
2523 mlir::OpBuilder::InsertPoint ip,
2524 mlir::Value arraySize) {
2525 assert(ip.isSet() && "Insertion point is not set");
2526 return mlir::cast<cir::AllocaOp>(
2527 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2528 .getDefiningOp());
2529}
2530
2531/// Try to emit a reference to the given value without producing it as
2532/// an l-value. For many cases, this is just an optimization, but it avoids
2533/// us needing to emit global copies of variables if they're named without
2534/// triggering a formal use in a context where we can't emit a direct
2535/// reference to them, for instance if a block or lambda or a member of a
2536/// local class uses a const int variable or constexpr variable from an
2537/// enclosing function.
2538///
2539/// For named members of enums, this is the only way they are emitted.
2542 const ValueDecl *value = refExpr->getDecl();
2543
2544 // There is a lot more to do here, but for now only EnumConstantDecl is
2545 // supported.
2547
2548 // The value needs to be an enum constant or a constant variable.
2549 if (!isa<EnumConstantDecl>(value))
2550 return ConstantEmission();
2551
2552 Expr::EvalResult result;
2553 if (!refExpr->EvaluateAsRValue(result, getContext()))
2554 return ConstantEmission();
2555
2556 QualType resultType = refExpr->getType();
2557
2558 // As long as we're only handling EnumConstantDecl, there should be no
2559 // side-effects.
2560 assert(!result.HasSideEffects);
2561
2562 // Emit as a constant.
2563 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2564 // somewhat heavy refactoring...)
2565 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2566 refExpr->getLocation(), result.Val, resultType);
2567 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2568 assert(cstToEmit && "expected a typed attribute");
2569
2571
2572 return ConstantEmission::forValue(cstToEmit);
2573}
2574
2578 return tryEmitAsConstant(dre);
2579 return ConstantEmission();
2580}
2581
2583 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2584 assert(constant && "not a constant");
2585 if (constant.isReference()) {
2586 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2587 return {};
2588 }
2589 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2590}
2591
2593 const StringLiteral *sl = e->getFunctionName();
2594 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2595 auto fn = cast<cir::FuncOp>(curFn);
2596 StringRef fnName = fn.getName();
2597 fnName.consume_front("\01");
2598 std::array<StringRef, 2> nameItems = {
2600 std::string gvName = llvm::join(nameItems, ".");
2601 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2602 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2603
2604 return emitStringLiteralLValue(sl, gvName);
2605}
2606
2611
2612namespace {
2613// Handle the case where the condition is a constant evaluatable simple integer,
2614// which means we don't have to separately handle the true/false blocks.
2615std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2617 const Expr *condExpr = e->getCond();
2618 llvm::APSInt condExprVal;
2619 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2620 return std::nullopt;
2621
2622 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2623 if (!condExprVal.getBoolValue())
2624 std::swap(live, dead);
2625
2626 if (cgf.containsLabel(dead))
2627 return std::nullopt;
2628
2629 // If the true case is live, we need to track its region.
2632 // If a throw expression we emit it and return an undefined lvalue
2633 // because it can't be used.
2634 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2635 cgf.emitCXXThrowExpr(throwExpr);
2636 // Return an undefined lvalue - the throw terminates execution
2637 // so this value will never actually be used
2638 mlir::Type elemTy = cgf.convertType(dead->getType());
2639 mlir::Value undefPtr =
2640 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2641 cgf.getLoc(throwExpr->getSourceRange()));
2642 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2643 dead->getType());
2644 }
2645 return cgf.emitLValue(live);
2646}
2647
2648/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2649/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2650/// LValue is returned and the current block has been terminated.
2651static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2652 const Expr *operand) {
2653 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2654 cgf.emitCXXThrowExpr(throwExpr);
2655 return std::nullopt;
2656 }
2657
2658 return cgf.emitLValue(operand);
2659}
2660} // namespace
2661
2662// Create and generate the 3 blocks for a conditional operator.
2663// Leaves the 'current block' in the continuation basic block.
2664template <typename FuncTy>
2667 const FuncTy &branchGenFunc) {
2668 ConditionalInfo info;
2669 ConditionalEvaluation eval(*this);
2670 mlir::Location loc = getLoc(e->getSourceRange());
2671 CIRGenBuilderTy &builder = getBuilder();
2672
2673 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2675 mlir::Type yieldTy{};
2676
2677 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2678 const Expr *expr, std::optional<LValue> &resultLV) {
2679 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2680 curLexScope->setAsTernary();
2681
2683 eval.beginEvaluation();
2684 resultLV = branchGenFunc(*this, expr);
2685 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2686 eval.endEvaluation();
2687
2688 if (resultPtr) {
2689 yieldTy = resultPtr.getType();
2690 cir::YieldOp::create(b, loc, resultPtr);
2691 } else {
2692 // If LHS or RHS is a void expression we need
2693 // to patch arms as to properly match yield types.
2694 // If the current block's terminator is an UnreachableOp (from a throw),
2695 // we don't need a yield
2696 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2697 mlir::Operation *terminator =
2698 builder.getInsertionBlock()->getTerminator();
2699 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2700 insertPoints.push_back(b.saveInsertionPoint());
2701 }
2702 }
2703 };
2704
2705 info.result = cir::TernaryOp::create(
2706 builder, loc, condV,
2707 /*trueBuilder=*/
2708 [&](mlir::OpBuilder &b, mlir::Location loc) {
2709 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2710 },
2711 /*falseBuilder=*/
2712 [&](mlir::OpBuilder &b, mlir::Location loc) {
2713 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2714 })
2715 .getResult();
2716
2717 // If both arms are void, so be it.
2718 if (!yieldTy)
2719 yieldTy = voidTy;
2720
2721 // Insert required yields.
2722 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2723 mlir::OpBuilder::InsertionGuard guard(builder);
2724 builder.restoreInsertionPoint(toInsert);
2725
2726 // Block does not return: build empty yield.
2727 if (!yieldTy) {
2728 cir::YieldOp::create(builder, loc);
2729 } else { // Block returns: set null yield value.
2730 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2731 cir::YieldOp::create(builder, loc, op0);
2732 }
2733 }
2734
2735 return info;
2736}
2737
2740 if (!expr->isGLValue()) {
2741 // ?: here should be an aggregate.
2742 assert(hasAggregateEvaluationKind(expr->getType()) &&
2743 "Unexpected conditional operator!");
2744 return emitAggExprToLValue(expr);
2745 }
2746
2747 OpaqueValueMapping binding(*this, expr);
2748 if (std::optional<LValue> res =
2749 handleConditionalOperatorLValueSimpleCase(*this, expr))
2750 return *res;
2751
2752 ConditionalInfo info =
2753 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2754 return emitLValueOrThrowExpression(cgf, e);
2755 });
2756
2757 if ((info.lhs && !info.lhs->isSimple()) ||
2758 (info.rhs && !info.rhs->isSimple())) {
2759 cgm.errorNYI(expr->getSourceRange(),
2760 "unsupported conditional operator with non-simple lvalue");
2761 return LValue();
2762 }
2763
2764 if (info.lhs && info.rhs) {
2765 Address lhsAddr = info.lhs->getAddress();
2766 Address rhsAddr = info.rhs->getAddress();
2767 Address result(info.result, lhsAddr.getElementType(),
2768 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2769 AlignmentSource alignSource =
2770 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2771 info.rhs->getBaseInfo().getAlignmentSource());
2773 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2774 }
2775
2776 assert((info.lhs || info.rhs) &&
2777 "both operands of glvalue conditional are throw-expressions?");
2778 return info.lhs ? *info.lhs : *info.rhs;
2779}
2780
2781/// An LValue is a candidate for having its loads and stores be made atomic if
2782/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2783/// performing such an operation can be performed without a libcall.
2785 if (!cgm.getLangOpts().MSVolatile)
2786 return false;
2787
2788 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2789 return false;
2790}
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
QualType getElementType() const
Definition TypeBase.h:3735
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
Expr * getRHS() const
Definition Expr.h:4090
Opcode getOpcode() const
Definition Expr.h:4083
mlir::Value getPointer() const
Definition Address.h:95
mlir::Type getElementType() const
Definition Address.h:122
static Address invalid()
Definition Address.h:73
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
cir::TargetAddressSpaceAttr getAddressSpace() const
Definition Address.h:130
clang::CharUnits getAlignment() const
Definition Address.h:135
mlir::Type getType() const
Definition Address.h:114
bool isValid() const
Definition Address.h:74
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:138
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
const TargetCIRGenInfo & getTargetHooks() const
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:256
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3126
Expr * getCallee()
Definition Expr.h:3090
arg_range arguments()
Definition Expr.h:3195
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CastKind getCastKind() const
Definition Expr.h:3720
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3763
bool changesVolatileQualification() const
Return.
Definition Expr.h:3810
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3605
bool isFileScope() const
Definition Expr.h:3637
const Expr * getInitializer() const
Definition Expr.h:3633
ConditionalOperator - The ?
Definition Expr.h:4391
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:487
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:83
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1545
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6564
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4415
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4447
const Expr * getBase() const
Definition Expr.h:6581
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4826
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4766
Represents a function declaration or definition.
Definition Decl.h:2000
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3447
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3588
Expr * getBase() const
Definition Expr.h:3441
bool isArrow() const
Definition Expr.h:3548
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3559
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8332
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4324
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3925
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8891
bool isBooleanType() const
Definition TypeBase.h:9021
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:419
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9187
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8628
bool isFunctionPointerType() const
Definition TypeBase.h:8596
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2338
bool isConstantMatrixType() const
Definition TypeBase.h:8696
bool isPointerType() const
Definition TypeBase.h:8529
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
bool isReferenceType() const
Definition TypeBase.h:8553
bool isVariableArrayType() const
Definition TypeBase.h:8640
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorBoolType() const
Definition TypeBase.h:8676
bool isAnyComplexType() const
Definition TypeBase.h:8664
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9064
bool isAtomicType() const
Definition TypeBase.h:8717
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
bool isFunctionType() const
Definition TypeBase.h:8525
bool isVectorType() const
Definition TypeBase.h:8668
bool isSubscriptableVectorType() const
Definition TypeBase.h:8688
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2355
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2179
bool hasInit() const
Definition Decl.cpp:2409
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2377
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Represents a GCC generic vector type.
Definition TypeBase.h:4176
Defines the clang::TargetInfo interface.
OverflowBehavior
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:937
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612