clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
19#include "mlir/IR/BuiltinAttributes.h"
20#include "mlir/IR/Value.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/Expr.h"
25#include "clang/AST/ExprCXX.h"
32#include <optional>
33
34using namespace clang;
35using namespace clang::CIRGen;
36using namespace cir;
37
38/// Get the address of a zero-sized field within a record. The resulting address
39/// doesn't necessarily have the right type.
41 const FieldDecl *field,
42 llvm::StringRef fieldName,
43 unsigned fieldIndex) {
44 if (field->isZeroSize(getContext())) {
45 cgm.errorNYI(field->getSourceRange(),
46 "emitAddrOfFieldStorage: zero-sized field");
47 return Address::invalid();
48 }
49
50 mlir::Location loc = getLoc(field->getLocation());
51
52 mlir::Type fieldType = convertType(field->getType());
53 auto fieldPtr = cir::PointerType::get(fieldType);
54 // For most cases fieldName is the same as field->getName() but for lambdas,
55 // which do not currently carry the name, so it can be passed down from the
56 // CaptureStmt.
57 cir::GetMemberOp memberAddr = builder.createGetMember(
58 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
59
60 // Retrieve layout information, compute alignment and return the final
61 // address.
62 const RecordDecl *rec = field->getParent();
63 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
64 unsigned idx = layout.getCIRFieldNo(field);
66 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
67 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
68}
69
70/// Given an expression of pointer type, try to
71/// derive a more accurate bound on the alignment of the pointer.
73 LValueBaseInfo *baseInfo) {
74 // We allow this with ObjC object pointers because of fragile ABIs.
75 assert(expr->getType()->isPointerType() ||
76 expr->getType()->isObjCObjectPointerType());
77 expr = expr->IgnoreParens();
78
79 // Casts:
80 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
81 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
82 cgm.emitExplicitCastExprType(ece);
83
84 switch (ce->getCastKind()) {
85 // Non-converting casts (but not C's implicit conversion from void*).
86 case CK_BitCast:
87 case CK_NoOp:
88 case CK_AddressSpaceConversion: {
89 if (const auto *ptrTy =
90 ce->getSubExpr()->getType()->getAs<PointerType>()) {
91 if (ptrTy->getPointeeType()->isVoidType())
92 break;
93
94 LValueBaseInfo innerBaseInfo;
96 Address addr =
97 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
98 if (baseInfo)
99 *baseInfo = innerBaseInfo;
100
101 if (isa<ExplicitCastExpr>(ce)) {
102 LValueBaseInfo targetTypeBaseInfo;
103
104 const QualType pointeeType = expr->getType()->getPointeeType();
105 const CharUnits align =
106 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
107
108 // If the source l-value is opaque, honor the alignment of the
109 // casted-to type.
110 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
111 if (baseInfo)
112 baseInfo->mergeForCast(targetTypeBaseInfo);
113 addr = Address(addr.getPointer(), addr.getElementType(), align);
114 }
115 }
116
118
119 const mlir::Type eltTy =
120 convertTypeForMem(expr->getType()->getPointeeType());
121 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
122 addr, eltTy);
124
125 return addr;
126 }
127 break;
128 }
129
130 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
131 case CK_ArrayToPointerDecay:
132 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
133
134 case CK_UncheckedDerivedToBase:
135 case CK_DerivedToBase: {
138 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
139 const CXXRecordDecl *derived =
140 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
141 return getAddressOfBaseClass(addr, derived, ce->path(),
143 ce->getExprLoc());
144 }
145
146 case CK_AnyPointerToBlockPointerCast:
147 case CK_BaseToDerived:
148 case CK_BaseToDerivedMemberPointer:
149 case CK_BlockPointerToObjCPointerCast:
150 case CK_BuiltinFnToFnPtr:
151 case CK_CPointerToObjCPointerCast:
152 case CK_DerivedToBaseMemberPointer:
153 case CK_Dynamic:
154 case CK_FunctionToPointerDecay:
155 case CK_IntegralToPointer:
156 case CK_LValueToRValue:
157 case CK_LValueToRValueBitCast:
158 case CK_NullToMemberPointer:
159 case CK_NullToPointer:
160 case CK_ReinterpretMemberPointer:
161 // Common pointer conversions, nothing to do here.
162 // TODO: Is there any reason to treat base-to-derived conversions
163 // specially?
164 break;
165
166 case CK_ARCConsumeObject:
167 case CK_ARCExtendBlockObject:
168 case CK_ARCProduceObject:
169 case CK_ARCReclaimReturnedObject:
170 case CK_AtomicToNonAtomic:
171 case CK_BooleanToSignedIntegral:
172 case CK_ConstructorConversion:
173 case CK_CopyAndAutoreleaseBlockObject:
174 case CK_Dependent:
175 case CK_FixedPointCast:
176 case CK_FixedPointToBoolean:
177 case CK_FixedPointToFloating:
178 case CK_FixedPointToIntegral:
179 case CK_FloatingCast:
180 case CK_FloatingComplexCast:
181 case CK_FloatingComplexToBoolean:
182 case CK_FloatingComplexToIntegralComplex:
183 case CK_FloatingComplexToReal:
184 case CK_FloatingRealToComplex:
185 case CK_FloatingToBoolean:
186 case CK_FloatingToFixedPoint:
187 case CK_FloatingToIntegral:
188 case CK_HLSLAggregateSplatCast:
189 case CK_HLSLArrayRValue:
190 case CK_HLSLElementwiseCast:
191 case CK_HLSLVectorTruncation:
192 case CK_HLSLMatrixTruncation:
193 case CK_IntToOCLSampler:
194 case CK_IntegralCast:
195 case CK_IntegralComplexCast:
196 case CK_IntegralComplexToBoolean:
197 case CK_IntegralComplexToFloatingComplex:
198 case CK_IntegralComplexToReal:
199 case CK_IntegralRealToComplex:
200 case CK_IntegralToBoolean:
201 case CK_IntegralToFixedPoint:
202 case CK_IntegralToFloating:
203 case CK_LValueBitCast:
204 case CK_MatrixCast:
205 case CK_MemberPointerToBoolean:
206 case CK_NonAtomicToAtomic:
207 case CK_ObjCObjectLValueCast:
208 case CK_PointerToBoolean:
209 case CK_PointerToIntegral:
210 case CK_ToUnion:
211 case CK_ToVoid:
212 case CK_UserDefinedConversion:
213 case CK_VectorSplat:
214 case CK_ZeroToOCLOpaqueType:
215 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
216 }
217 }
218
219 // Unary &
220 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
221 // TODO(cir): maybe we should use cir.unary for pointers here instead.
222 if (uo->getOpcode() == UO_AddrOf) {
223 LValue lv = emitLValue(uo->getSubExpr());
224 if (baseInfo)
225 *baseInfo = lv.getBaseInfo();
227 return lv.getAddress();
228 }
229 }
230
231 // std::addressof and variants.
232 if (auto const *call = dyn_cast<CallExpr>(expr)) {
233 switch (call->getBuiltinCallee()) {
234 default:
235 break;
236 case Builtin::BIaddressof:
237 case Builtin::BI__addressof:
238 case Builtin::BI__builtin_addressof: {
239 cgm.errorNYI(expr->getSourceRange(),
240 "emitPointerWithAlignment: builtin addressof");
241 return Address::invalid();
242 }
243 }
244 }
245
246 // Otherwise, use the alignment of the type.
248 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
249 /*forPointeeType=*/true, baseInfo);
250}
251
253 bool isInit) {
254 if (!dst.isSimple()) {
255 if (dst.isVectorElt()) {
256 // Read/modify/write the vector, inserting the new element
257 const mlir::Location loc = dst.getVectorPointer().getLoc();
258 const mlir::Value vector =
259 builder.createLoad(loc, dst.getVectorAddress());
260 const mlir::Value newVector = cir::VecInsertOp::create(
261 builder, loc, vector, src.getValue(), dst.getVectorIdx());
262 builder.createStore(loc, newVector, dst.getVectorAddress());
263 return;
264 }
265
266 assert(dst.isBitField() && "Unknown LValue type");
268 return;
269
270 cgm.errorNYI(dst.getPointer().getLoc(),
271 "emitStoreThroughLValue: non-simple lvalue");
272 return;
273 }
274
276
277 assert(src.isScalar() && "Can't emit an aggregate store with this method");
278 emitStoreOfScalar(src.getValue(), dst, isInit);
279}
280
281static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
282 const VarDecl *vd) {
283 QualType t = e->getType();
284
285 // If it's thread_local, emit a call to its wrapper function instead.
286 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
287 cgf.cgm.errorNYI(e->getSourceRange(),
288 "emitGlobalVarDeclLValue: thread_local variable");
289
290 // Check if the variable is marked as declare target with link clause in
291 // device codegen.
292 if (cgf.getLangOpts().OpenMP)
293 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
294
295 // Traditional LLVM codegen handles thread local separately, CIR handles
296 // as part of getAddrOfGlobalVar.
297 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
298
300 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
301 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
302 if (realPtrTy != v.getType())
303 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
304
305 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
306 Address addr(v, realVarTy, alignment);
307 LValue lv;
308 if (vd->getType()->isReferenceType())
309 lv = cgf.emitLoadOfReferenceLValue(addr, cgf.getLoc(e->getSourceRange()),
311 else
312 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
314 return lv;
315}
316
317void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
318 bool isVolatile, QualType ty,
319 LValueBaseInfo baseInfo, bool isInit,
320 bool isNontemporal) {
321
322 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
323 // Boolean vectors use `iN` as storage type.
324 if (clangVecTy->isExtVectorBoolType())
325 cgm.errorNYI(addr.getPointer().getLoc(),
326 "emitStoreOfScalar ExtVectorBoolType");
327
328 // Handle vectors of size 3 like size 4 for better performance.
329 const mlir::Type elementType = addr.getElementType();
330 const auto vecTy = cast<cir::VectorType>(elementType);
331
332 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
334 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
335 cgm.errorNYI(addr.getPointer().getLoc(),
336 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
337 }
338
339 value = emitToMemory(value, ty);
340
342 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
343 if (ty->isAtomicType() ||
344 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
345 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
346 return;
347 }
348
349 // Update the alloca with more info on initialization.
350 assert(addr.getPointer() && "expected pointer to exist");
351 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
352 if (currVarDecl && srcAlloca) {
353 const VarDecl *vd = currVarDecl;
354 assert(vd && "VarDecl expected");
355 if (vd->hasInit())
356 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
357 }
358
359 assert(currSrcLoc && "must pass in source location");
360 builder.createStore(*currSrcLoc, value, addr, isVolatile);
361
362 if (isNontemporal) {
363 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
364 return;
365 }
366
368}
369
370// TODO: Replace this with a proper TargetInfo function call.
371/// Helper method to check if the underlying ABI is AAPCS
372static bool isAAPCS(const TargetInfo &targetInfo) {
373 return targetInfo.getABI().starts_with("aapcs");
374}
375
377 LValue dst) {
378
379 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
380 mlir::Type resLTy = convertTypeForMem(dst.getType());
381 Address ptr = dst.getBitFieldAddress();
382
383 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
384 dst.isVolatileQualified() &&
385 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
386
387 assert(currSrcLoc && "must pass in source location");
388
389 return builder.createSetBitfield(*currSrcLoc, resLTy, ptr,
390 ptr.getElementType(), src.getValue(), info,
391 dst.isVolatileQualified(), useVoaltile);
392}
393
395 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
396
397 // Get the output type.
398 mlir::Type resLTy = convertType(lv.getType());
399 Address ptr = lv.getBitFieldAddress();
400
401 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
402 isAAPCS(cgm.getTarget());
403
404 mlir::Value field =
405 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
406 info, lv.isVolatile(), useVoaltile);
408 return RValue::get(field);
409}
410
412 const FieldDecl *field,
413 mlir::Type fieldType,
414 unsigned index) {
415 mlir::Location loc = getLoc(field->getLocation());
416 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
418 cir::GetMemberOp sea = getBuilder().createGetMember(
419 loc, fieldPtr, base.getPointer(), field->getName(),
420 rec.isUnion() ? field->getFieldIndex() : index);
422 rec.getElementOffset(cgm.getDataLayout().layout, index));
423 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
424}
425
427 const FieldDecl *field) {
428 LValueBaseInfo baseInfo = base.getBaseInfo();
429 const CIRGenRecordLayout &layout =
430 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
431 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
432
434
435 unsigned idx = layout.getCIRFieldNo(field);
436 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
437
438 mlir::Location loc = getLoc(field->getLocation());
439 if (addr.getElementType() != info.storageType)
440 addr = builder.createElementBitCast(loc, addr, info.storageType);
441
442 QualType fieldType =
444 // TODO(cir): Support TBAA for bit fields.
446 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
447 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
448}
449
451 LValueBaseInfo baseInfo = base.getBaseInfo();
452
453 if (field->isBitField())
454 return emitLValueForBitField(base, field);
455
456 QualType fieldType = field->getType();
457 const RecordDecl *rec = field->getParent();
458 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
459 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
461
462 Address addr = base.getAddress();
463 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
464 if (cgm.getCodeGenOpts().StrictVTablePointers &&
465 classDecl->isDynamicClass()) {
466 cgm.errorNYI(field->getSourceRange(),
467 "emitLValueForField: strict vtable for dynamic class");
468 }
469 }
470
471 unsigned recordCVR = base.getVRQualifiers();
472
473 llvm::StringRef fieldName = field->getName();
474 unsigned fieldIndex;
475 if (cgm.lambdaFieldToName.count(field))
476 fieldName = cgm.lambdaFieldToName[field];
477
478 if (rec->isUnion())
479 fieldIndex = field->getFieldIndex();
480 else {
481 const CIRGenRecordLayout &layout =
482 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
483 fieldIndex = layout.getCIRFieldNo(field);
484 }
485
486 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
488
489 // If this is a reference field, load the reference right now.
490 if (fieldType->isReferenceType()) {
492 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
493 if (recordCVR & Qualifiers::Volatile)
494 refLVal.getQuals().addVolatile();
495 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
496 &fieldBaseInfo);
497
498 // Qualifiers on the struct don't apply to the referencee.
499 recordCVR = 0;
500 fieldType = fieldType->getPointeeType();
501 }
502
503 if (field->hasAttr<AnnotateAttr>()) {
504 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
505 return LValue();
506 }
507
508 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
509 lv.getQuals().addCVRQualifiers(recordCVR);
510
511 // __weak attribute on a field is ignored.
513 cgm.errorNYI(field->getSourceRange(),
514 "emitLValueForField: __weak attribute");
515 return LValue();
516 }
517
518 return lv;
519}
520
522 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
523 QualType fieldType = field->getType();
524
525 if (!fieldType->isReferenceType())
526 return emitLValueForField(base, field);
527
528 const CIRGenRecordLayout &layout =
529 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
530 unsigned fieldIndex = layout.getCIRFieldNo(field);
531
532 Address v =
533 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
534
535 // Make sure that the address is pointing to the right type.
536 mlir::Type memTy = convertTypeForMem(fieldType);
537 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
538
539 // TODO: Generate TBAA information that describes this access as a structure
540 // member access and not just an access to an object of the field's type. This
541 // should be similar to what we do in EmitLValueForField().
542 LValueBaseInfo baseInfo = base.getBaseInfo();
543 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
544 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
546 return makeAddrLValue(v, fieldType, fieldBaseInfo);
547}
548
549/// Converts a scalar value from its primary IR type (as returned
550/// by ConvertType) to its load/store type.
551mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
552 if (auto *atomicTy = ty->getAs<AtomicType>())
553 ty = atomicTy->getValueType();
554
555 if (ty->isExtVectorBoolType()) {
556 cgm.errorNYI("emitToMemory: extVectorBoolType");
557 }
558
559 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
560 // kept as `cir.int<N>` until further lowering
561
562 return value;
563}
564
565mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
566 if (auto *atomicTy = ty->getAs<AtomicType>())
567 ty = atomicTy->getValueType();
568
570 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
571 }
572
573 return value;
574}
575
576void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
577 bool isInit) {
578 if (lvalue.getType()->isConstantMatrixType()) {
579 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
580 return;
581 }
582
583 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
584 lvalue.getType(), lvalue.getBaseInfo(), isInit,
585 /*isNontemporal=*/false);
586}
587
588mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
589 QualType ty, SourceLocation loc,
590 LValueBaseInfo baseInfo) {
591 // Traditional LLVM codegen handles thread local separately, CIR handles
592 // as part of getAddrOfGlobalVar (GetGlobalOp).
593 mlir::Type eltTy = addr.getElementType();
594
595 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
596 if (clangVecTy->isExtVectorBoolType()) {
597 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
598 return nullptr;
599 }
600
601 const auto vecTy = cast<cir::VectorType>(eltTy);
602
603 // Handle vectors of size 3 like size 4 for better performance.
605 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
606 cgm.errorNYI(addr.getPointer().getLoc(),
607 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
608 }
609
611 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
612 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
613 cgm.errorNYI("emitLoadOfScalar: load atomic");
614
615 if (mlir::isa<cir::VoidType>(eltTy))
616 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
617
619
620 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
621 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
622 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
623
624 return loadOp;
625}
626
628 SourceLocation loc) {
631 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
632 lvalue.getType(), loc, lvalue.getBaseInfo());
633}
634
635/// Given an expression that represents a value lvalue, this
636/// method emits the address of the lvalue, then loads the result as an rvalue,
637/// returning the rvalue.
639 assert(!lv.getType()->isFunctionType());
640 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
641
642 if (lv.isBitField())
643 return emitLoadOfBitfieldLValue(lv, loc);
644
645 if (lv.isSimple())
646 return RValue::get(emitLoadOfScalar(lv, loc));
647
648 if (lv.isVectorElt()) {
649 const mlir::Value load =
650 builder.createLoad(getLoc(loc), lv.getVectorAddress());
651 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
652 lv.getVectorIdx()));
653 }
654
655 if (lv.isExtVectorElt())
657
658 cgm.errorNYI(loc, "emitLoadOfLValue");
659 return RValue::get(nullptr);
660}
661
662int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
663 const mlir::ArrayAttr elts) {
664 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
665 return elt.getInt();
666}
667
668// If this is a reference to a subset of the elements of a vector, create an
669// appropriate shufflevector.
671 mlir::Location loc = lv.getExtVectorPointer().getLoc();
672 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
673
674 // HLSL allows treating scalars as one-element vectors. Converting the scalar
675 // IR value to a vector here allows the rest of codegen to behave as normal.
676 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
677 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
678 return {};
679 }
680
681 const mlir::ArrayAttr elts = lv.getExtVectorElts();
682
683 // If the result of the expression is a non-vector type, we must be extracting
684 // a single element. Just codegen as an extractelement.
685 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
686 if (!exprVecTy) {
687 int64_t indexValue = getAccessedFieldNo(0, elts);
688 cir::ConstantOp index =
689 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
690 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
691 }
692
693 // Always use shuffle vector to try to retain the original program structure
695 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
696 mask.push_back(getAccessedFieldNo(i, elts));
697
698 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
699 if (lv.getType()->isExtVectorBoolType()) {
700 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
701 return {};
702 }
703
704 return RValue::get(resultVec);
705}
706
707LValue
709 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
710 "unexpected binary operator opcode");
711
712 Address baseAddr = Address::invalid();
713 if (e->getOpcode() == BO_PtrMemD)
714 baseAddr = emitLValue(e->getLHS()).getAddress();
715 else
716 baseAddr = emitPointerWithAlignment(e->getLHS());
717
718 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
719
720 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
721
722 LValueBaseInfo baseInfo;
724 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
725 memberPtrTy, &baseInfo);
726
727 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
728}
729
730/// Generates lvalue for partial ext_vector access.
732 mlir::Location loc) {
733 Address vectorAddress = lv.getExtVectorAddress();
734 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
735 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
736 Address castToPointerElement =
737 vectorAddress.withElementType(builder, vectorElementTy);
738
739 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
740 unsigned idx = getAccessedFieldNo(0, extVecElts);
741 mlir::Value idxValue =
742 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
743
744 mlir::Value elementValue = builder.getArrayElement(
745 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
746 /*shouldDecay=*/false);
747
748 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
749 const CharUnits alignment =
750 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
751 return Address(elementValue, vectorElementTy, alignment);
752}
753
754static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
756 return cgm.getAddrOfFunction(gd);
757}
758
760 mlir::Value thisValue) {
761 return cgf.emitLValueForLambdaField(fd, thisValue);
762}
763
764/// Given that we are currently emitting a lambda, emit an l-value for
765/// one of its members.
766///
768 mlir::Value thisValue) {
769 bool hasExplicitObjectParameter = false;
770 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
771 LValue lambdaLV;
772 if (methD) {
773 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
774 assert(methD->getParent()->isLambda());
775 assert(methD->getParent() == field->getParent());
776 }
777 if (hasExplicitObjectParameter) {
778 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
779 } else {
780 QualType lambdaTagType =
782 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
783 }
784 return emitLValueForField(lambdaLV, field);
785}
786
790
791static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
792 GlobalDecl gd) {
793 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
794 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
795 mlir::Location loc = cgf.getLoc(e->getSourceRange());
796 CharUnits align = cgf.getContext().getDeclAlign(fd);
797
799
800 mlir::Type fnTy = funcOp.getFunctionType();
801 mlir::Type ptrTy = cir::PointerType::get(fnTy);
802 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
803 funcOp.getSymName());
804
805 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
806 fnTy = cgf.convertType(fd->getType());
807 ptrTy = cir::PointerType::get(fnTy);
808
809 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
810 cir::CastKind::bitcast, addr);
811 }
812
813 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
815}
816
817/// Determine whether we can emit a reference to \p vd from the current
818/// context, despite not necessarily having seen an odr-use of the variable in
819/// this context.
820/// TODO(cir): This could be shared with classic codegen.
822 const DeclRefExpr *e,
823 const VarDecl *vd) {
824 // For a variable declared in an enclosing scope, do not emit a spurious
825 // reference even if we have a capture, as that will emit an unwarranted
826 // reference to our capture state, and will likely generate worse code than
827 // emitting a local copy.
829 return false;
830
831 // For a local declaration declared in this function, we can always reference
832 // it even if we don't have an odr-use.
833 if (vd->hasLocalStorage()) {
834 return vd->getDeclContext() ==
835 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
836 }
837
838 // For a global declaration, we can emit a reference to it if we know
839 // for sure that we are able to emit a definition of it.
840 vd = vd->getDefinition(cgf.getContext());
841 if (!vd)
842 return false;
843
844 // Don't emit a spurious reference if it might be to a variable that only
845 // exists on a different device / target.
846 // FIXME: This is unnecessarily broad. Check whether this would actually be a
847 // cross-target reference.
848 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
849 cgf.getLangOpts().OpenCL) {
850 return false;
851 }
852
853 // We can emit a spurious reference only if the linkage implies that we'll
854 // be emitting a non-interposable symbol that will be retained until link
855 // time.
856 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
857 case cir::GlobalLinkageKind::ExternalLinkage:
858 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
859 case cir::GlobalLinkageKind::WeakODRLinkage:
860 case cir::GlobalLinkageKind::InternalLinkage:
861 case cir::GlobalLinkageKind::PrivateLinkage:
862 return true;
863 default:
864 return false;
865 }
866}
867
869 const NamedDecl *nd = e->getDecl();
870 QualType ty = e->getType();
871
872 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
873 "should not emit an unevaluated operand");
874
875 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
876 // Global Named registers access via intrinsics only
877 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
878 !vd->isLocalVarDecl()) {
879 cgm.errorNYI(e->getSourceRange(),
880 "emitDeclRefLValue: Global Named registers access");
881 return LValue();
882 }
883
884 if (e->isNonOdrUse() == NOUR_Constant &&
885 (vd->getType()->isReferenceType() ||
886 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
887 vd->getAnyInitializer(vd);
888 mlir::Attribute val = ConstantEmitter(*this).emitAbstract(
889 e->getLocation(), *vd->evaluateValue(), vd->getType());
890 assert(val && "failed to emit constant expression");
891
892 Address addr = Address::invalid();
893 if (!vd->getType()->isReferenceType()) {
894 // Spill the constant value to a global.
895 addr = cgm.createUnnamedGlobalFrom(*vd, val,
896 getContext().getDeclAlign(vd));
897 mlir::Type varTy = getTypes().convertTypeForMem(vd->getType());
898 auto ptrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
899 if (ptrTy.getPointee() != varTy) {
900 addr = addr.withElementType(builder, varTy);
901 }
902 } else {
903 cgm.errorNYI(e->getSourceRange(),
904 "emitDeclRefLValue: non-odr reference type");
905 }
906 return makeAddrLValue(addr, ty, AlignmentSource::Decl);
907 }
908
909 // Check for captured variables.
911 vd = vd->getCanonicalDecl();
912 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
913 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
916 }
917 }
918
919 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
920 // Checks for omitted feature handling
927
928 // Check if this is a global variable
929 if (vd->hasLinkage() || vd->isStaticDataMember())
930 return emitGlobalVarDeclLValue(*this, e, vd);
931
932 Address addr = Address::invalid();
933
934 // The variable should generally be present in the local decl map.
935 auto iter = localDeclMap.find(vd);
936 if (iter != localDeclMap.end()) {
937 addr = iter->second;
938 } else {
939 // Otherwise, it might be static local we haven't emitted yet for some
940 // reason; most likely, because it's in an outer function.
941 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
942 }
943
944 // Drill into reference types.
945 LValue lv =
946 vd->getType()->isReferenceType()
950
951 // Statics are defined as globals, so they are not include in the function's
952 // symbol table.
953 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
954 "non-static locals should be already mapped");
955
956 return lv;
957 }
958
959 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
962 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
963 return LValue();
964 }
965 return emitLValue(bd->getBinding());
966 }
967
968 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
969 LValue lv = emitFunctionDeclLValue(*this, e, fd);
970
971 // Emit debuginfo for the function declaration if the target wants to.
972 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
974
975 return lv;
976 }
977
978 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
979 return LValue();
980}
981
983 QualType boolTy = getContext().BoolTy;
984 SourceLocation loc = e->getExprLoc();
985
987 if (e->getType()->getAs<MemberPointerType>()) {
988 cgm.errorNYI(e->getSourceRange(),
989 "evaluateExprAsBool: member pointer type");
990 return createDummyValue(getLoc(loc), boolTy);
991 }
992
993 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
994 if (!e->getType()->isAnyComplexType())
995 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
996
998 loc);
999}
1000
1002 UnaryOperatorKind op = e->getOpcode();
1003
1004 // __extension__ doesn't affect lvalue-ness.
1005 if (op == UO_Extension)
1006 return emitLValue(e->getSubExpr());
1007
1008 switch (op) {
1009 case UO_Deref: {
1011 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1012
1014 LValueBaseInfo baseInfo;
1015 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
1016
1017 // Tag 'load' with deref attribute.
1018 // FIXME: This misses some derefence cases and has problematic interactions
1019 // with other operators.
1020 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1021 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1022
1023 LValue lv = makeAddrLValue(addr, t, baseInfo);
1026 return lv;
1027 }
1028 case UO_Real:
1029 case UO_Imag: {
1030 LValue lv = emitLValue(e->getSubExpr());
1031 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1032
1033 // __real is valid on scalars. This is a faster way of testing that.
1034 // __imag can only produce an rvalue on scalars.
1035 if (e->getOpcode() == UO_Real &&
1036 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1037 assert(e->getSubExpr()->getType()->isArithmeticType());
1038 return lv;
1039 }
1040
1042 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1043 mlir::Location loc = getLoc(e->getExprLoc());
1044 Address component =
1045 e->getOpcode() == UO_Real
1046 ? builder.createComplexRealPtr(loc, lv.getAddress())
1047 : builder.createComplexImagPtr(loc, lv.getAddress());
1049 LValue elemLV = makeAddrLValue(component, elemTy);
1050 elemLV.getQuals().addQualifiers(lv.getQuals());
1051 return elemLV;
1052 }
1053 case UO_PreInc:
1054 case UO_PreDec: {
1055 cir::UnaryOpKind kind =
1056 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
1057 LValue lv = emitLValue(e->getSubExpr());
1058
1059 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1060
1061 if (e->getType()->isAnyComplexType()) {
1062 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
1063 } else {
1064 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
1065 }
1066
1067 return lv;
1068 }
1069 case UO_Extension:
1070 llvm_unreachable("UnaryOperator extension should be handled above!");
1071 case UO_Plus:
1072 case UO_Minus:
1073 case UO_Not:
1074 case UO_LNot:
1075 case UO_AddrOf:
1076 case UO_PostInc:
1077 case UO_PostDec:
1078 case UO_Coawait:
1079 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1080 }
1081 llvm_unreachable("Unknown unary operator kind!");
1082}
1083
1084/// If the specified expr is a simple decay from an array to pointer,
1085/// return the array subexpression.
1086/// FIXME: this could be abstracted into a common AST helper.
1087static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1088 // If this isn't just an array->pointer decay, bail out.
1089 const auto *castExpr = dyn_cast<CastExpr>(e);
1090 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1091 return nullptr;
1092
1093 // If this is a decay from variable width array, bail out.
1094 const Expr *subExpr = castExpr->getSubExpr();
1095 if (subExpr->getType()->isVariableArrayType())
1096 return nullptr;
1097
1098 return subExpr;
1099}
1100
1101static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1102 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1103 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1104 return constantOp.getValueAttr<cir::IntAttr>();
1105 return {};
1106}
1107
1108static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1109 CharUnits eltSize) {
1110 // If we have a constant index, we can use the exact offset of the
1111 // element we're accessing.
1112 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1113 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1114 return arrayAlign.alignmentAtOffset(offset);
1115 }
1116 // Otherwise, use the worst-case alignment for any element.
1117 return arrayAlign.alignmentOfArrayElement(eltSize);
1118}
1119
1121 const VariableArrayType *vla) {
1122 QualType eltType;
1123 do {
1124 eltType = vla->getElementType();
1125 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1126 return eltType;
1127}
1128
1130 mlir::Location beginLoc,
1131 mlir::Location endLoc, mlir::Value ptr,
1132 mlir::Type eltTy, mlir::Value idx,
1133 bool shouldDecay) {
1134 CIRGenModule &cgm = cgf.getCIRGenModule();
1135 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1136 // that would enhance tracking this later in CIR?
1138 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1139 shouldDecay);
1140}
1141
1143 mlir::Location beginLoc,
1144 mlir::Location endLoc, Address addr,
1145 QualType eltType, mlir::Value idx,
1146 mlir::Location loc, bool shouldDecay) {
1147
1148 // Determine the element size of the statically-sized base. This is
1149 // the thing that the indices are expressed in terms of.
1150 if (const VariableArrayType *vla =
1151 cgf.getContext().getAsVariableArrayType(eltType)) {
1152 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1153 }
1154
1155 // We can use that to compute the best alignment of the element.
1156 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1157 const CharUnits eltAlign =
1158 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1159
1161 const mlir::Value eltPtr =
1162 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1163 addr.getElementType(), idx, shouldDecay);
1164 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1165 return Address(eltPtr, elementType, eltAlign);
1166}
1167
1168LValue
1170 if (e->getType()->getAs<ObjCObjectType>()) {
1171 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1173 }
1174
1175 // The index must always be an integer, which is not an aggregate. Emit it
1176 // in lexical order (this complexity is, sadly, required by C++17).
1177 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1178 "index was neither LHS nor RHS");
1179
1180 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1181 const mlir::Value idx = emitScalarExpr(e->getIdx());
1182
1183 // Extend or truncate the index type to 32 or 64-bits.
1184 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1185 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1186 cgm.errorNYI(e->getSourceRange(),
1187 "emitArraySubscriptExpr: index type cast");
1188 return idx;
1189 };
1190
1191 // If the base is a vector type, then we are forming a vector element
1192 // with this subscript.
1193 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1195 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1196 const LValue lv = emitLValue(e->getBase());
1197 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1198 lv.getBaseInfo());
1199 }
1200
1201 // The HLSL runtime handles subscript expressions on global resource arrays
1202 // and objects with HLSL buffer layouts.
1203 if (getLangOpts().HLSL) {
1204 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1205 return {};
1206 }
1207
1208 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1209
1210 // Handle the extvector case we ignored above.
1212 const LValue lv = emitLValue(e->getBase());
1213 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1214
1215 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1216 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1217 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1218 idx, cgm.getLoc(e->getExprLoc()),
1219 /*shouldDecay=*/false);
1220
1221 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1222 }
1223
1224 if (const VariableArrayType *vla =
1225 getContext().getAsVariableArrayType(e->getType())) {
1226 // The base must be a pointer, which is not an aggregate. Emit
1227 // it. It needs to be emitted first in case it's what captures
1228 // the VLA bounds.
1230
1231 // The element count here is the total number of non-VLA elements.
1232 mlir::Value numElements = getVLASize(vla).numElts;
1233 idx = builder.createIntCast(idx, numElements.getType());
1234
1235 // Effectively, the multiply by the VLA size is part of the GEP.
1236 // GEP indexes are signed, and scaling an index isn't permitted to
1237 // signed-overflow, so we use the same semantics for our explicit
1238 // multiply. We suppress this if overflow is not undefined behavior.
1239 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1242 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1243 overflowBehavior);
1244
1245 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1246 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1247 idx, cgm.getLoc(e->getExprLoc()),
1248 /*shouldDecay=*/false);
1249
1250 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1251 }
1252
1253 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1254 LValue arrayLV;
1255 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1256 arrayLV = emitArraySubscriptExpr(ase);
1257 else
1258 arrayLV = emitLValue(array);
1259
1260 // Propagate the alignment from the array itself to the result.
1261 const Address addr = emitArraySubscriptPtr(
1262 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1263 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1264 /*shouldDecay=*/true);
1265
1266 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1267
1268 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1269 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1270 }
1271
1272 return lv;
1273 }
1274
1275 // The base must be a pointer; emit it with an estimate of its alignment.
1276 assert(e->getBase()->getType()->isPointerType() &&
1277 "The base must be a pointer");
1278
1279 LValueBaseInfo eltBaseInfo;
1280 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1281 // Propagate the alignment from the array itself to the result.
1282 const Address addxr = emitArraySubscriptPtr(
1283 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1284 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1285 /*shouldDecay=*/false);
1286
1287 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1288
1289 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1290 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1291 }
1292
1293 return lv;
1294}
1295
1297 // Emit the base vector as an l-value.
1298 LValue base;
1299
1300 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1301 if (e->isArrow()) {
1302 // If it is a pointer to a vector, emit the address and form an lvalue with
1303 // it.
1304 LValueBaseInfo baseInfo;
1305 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1306 const auto *clangPtrTy =
1308 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1309 base.getQuals().removeObjCGCAttr();
1310 } else if (e->getBase()->isGLValue()) {
1311 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1312 // emit the base as an lvalue.
1313 assert(e->getBase()->getType()->isVectorType());
1314 base = emitLValue(e->getBase());
1315 } else {
1316 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1317 assert(e->getBase()->getType()->isVectorType() &&
1318 "Result must be a vector");
1319 mlir::Value vec = emitScalarExpr(e->getBase());
1320
1321 // Store the vector to memory (because LValue wants an address).
1322 QualType baseTy = e->getBase()->getType();
1323 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1324 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1325 cgm.errorNYI(e->getSourceRange(),
1326 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1327 return {};
1328 }
1329 builder.createStore(vec.getLoc(), vec, vecMem);
1330 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1331 }
1332
1333 QualType type =
1335
1336 // Encode the element access list into a vector of unsigned indices.
1338 e->getEncodedElementAccess(indices);
1339
1340 if (base.isSimple()) {
1341 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1342 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1343 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1344 base.getBaseInfo());
1345 }
1346
1347 cgm.errorNYI(e->getSourceRange(),
1348 "emitExtVectorElementExpr: isSimple is false");
1349 return {};
1350}
1351
1353 llvm::StringRef name) {
1354 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1355 assert(globalOp.getAlignment() && "expected alignment for string literal");
1356 unsigned align = *(globalOp.getAlignment());
1357 mlir::Value addr =
1358 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1359 return makeAddrLValue(
1360 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1362}
1363
1364/// Casts are never lvalues unless that cast is to a reference type. If the cast
1365/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1366/// is needed by the code generator in an lvalue context, then it must mean that
1367/// we need the address of an aggregate in order to access one of its members.
1368/// This can happen for all the reasons that casts are permitted with aggregate
1369/// result, including noop aggregate casts, and cast from scalar to union.
1371 switch (e->getCastKind()) {
1372 case CK_ToVoid:
1373 case CK_BitCast:
1374 case CK_LValueToRValueBitCast:
1375 case CK_ArrayToPointerDecay:
1376 case CK_FunctionToPointerDecay:
1377 case CK_NullToMemberPointer:
1378 case CK_NullToPointer:
1379 case CK_IntegralToPointer:
1380 case CK_PointerToIntegral:
1381 case CK_PointerToBoolean:
1382 case CK_IntegralCast:
1383 case CK_BooleanToSignedIntegral:
1384 case CK_IntegralToBoolean:
1385 case CK_IntegralToFloating:
1386 case CK_FloatingToIntegral:
1387 case CK_FloatingToBoolean:
1388 case CK_FloatingCast:
1389 case CK_FloatingRealToComplex:
1390 case CK_FloatingComplexToReal:
1391 case CK_FloatingComplexToBoolean:
1392 case CK_FloatingComplexCast:
1393 case CK_FloatingComplexToIntegralComplex:
1394 case CK_IntegralRealToComplex:
1395 case CK_IntegralComplexToReal:
1396 case CK_IntegralComplexToBoolean:
1397 case CK_IntegralComplexCast:
1398 case CK_IntegralComplexToFloatingComplex:
1399 case CK_DerivedToBaseMemberPointer:
1400 case CK_BaseToDerivedMemberPointer:
1401 case CK_MemberPointerToBoolean:
1402 case CK_ReinterpretMemberPointer:
1403 case CK_AnyPointerToBlockPointerCast:
1404 case CK_ARCProduceObject:
1405 case CK_ARCConsumeObject:
1406 case CK_ARCReclaimReturnedObject:
1407 case CK_ARCExtendBlockObject:
1408 case CK_CopyAndAutoreleaseBlockObject:
1409 case CK_IntToOCLSampler:
1410 case CK_FloatingToFixedPoint:
1411 case CK_FixedPointToFloating:
1412 case CK_FixedPointCast:
1413 case CK_FixedPointToBoolean:
1414 case CK_FixedPointToIntegral:
1415 case CK_IntegralToFixedPoint:
1416 case CK_MatrixCast:
1417 case CK_HLSLVectorTruncation:
1418 case CK_HLSLMatrixTruncation:
1419 case CK_HLSLArrayRValue:
1420 case CK_HLSLElementwiseCast:
1421 case CK_HLSLAggregateSplatCast:
1422 llvm_unreachable("unexpected cast lvalue");
1423
1424 case CK_Dependent:
1425 llvm_unreachable("dependent cast kind in IR gen!");
1426
1427 case CK_BuiltinFnToFnPtr:
1428 llvm_unreachable("builtin functions are handled elsewhere");
1429
1430 case CK_Dynamic: {
1431 LValue lv = emitLValue(e->getSubExpr());
1432 Address v = lv.getAddress();
1433 const auto *dce = cast<CXXDynamicCastExpr>(e);
1435 }
1436
1437 // These are never l-values; just use the aggregate emission code.
1438 case CK_NonAtomicToAtomic:
1439 case CK_AtomicToNonAtomic:
1440 case CK_ToUnion:
1441 case CK_ObjCObjectLValueCast:
1442 case CK_VectorSplat:
1443 case CK_ConstructorConversion:
1444 case CK_UserDefinedConversion:
1445 case CK_CPointerToObjCPointerCast:
1446 case CK_BlockPointerToObjCPointerCast:
1447 case CK_LValueToRValue: {
1448 cgm.errorNYI(e->getSourceRange(),
1449 std::string("emitCastLValue for unhandled cast kind: ") +
1450 e->getCastKindName());
1451
1452 return {};
1453 }
1454 case CK_AddressSpaceConversion: {
1455 LValue lv = emitLValue(e->getSubExpr());
1456 QualType destTy = getContext().getPointerType(e->getType());
1457
1458 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1459 mlir::ptr::MemorySpaceAttrInterface srcAS;
1460 if (clang::isTargetAddressSpace(srcLangAS))
1461 srcAS = cir::toCIRAddressSpaceAttr(getMLIRContext(), srcLangAS);
1462 else
1463 cgm.errorNYI(
1464 e->getSourceRange(),
1465 "emitCastLValue: address space conversion from unknown address "
1466 "space");
1467
1468 mlir::Value v = performAddrSpaceCast(lv.getPointer(), convertType(destTy));
1469
1471 lv.getAddress().getAlignment()),
1472 e->getType(), lv.getBaseInfo());
1473 }
1474
1475 case CK_LValueBitCast: {
1476 // This must be a reinterpret_cast (or c-style equivalent).
1477 const auto *ce = cast<ExplicitCastExpr>(e);
1478
1479 cgm.emitExplicitCastExprType(ce, this);
1480 LValue LV = emitLValue(e->getSubExpr());
1482 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1483
1484 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1485 }
1486
1487 case CK_NoOp: {
1488 // CK_NoOp can model a qualification conversion, which can remove an array
1489 // bound and change the IR type.
1490 LValue lv = emitLValue(e->getSubExpr());
1491 // Propagate the volatile qualifier to LValue, if exists in e.
1493 lv.getQuals() = e->getType().getQualifiers();
1494 if (lv.isSimple()) {
1495 Address v = lv.getAddress();
1496 if (v.isValid()) {
1497 mlir::Type ty = convertTypeForMem(e->getType());
1498 if (v.getElementType() != ty)
1499 cgm.errorNYI(e->getSourceRange(),
1500 "emitCastLValue: NoOp needs bitcast");
1501 }
1502 }
1503 return lv;
1504 }
1505
1506 case CK_UncheckedDerivedToBase:
1507 case CK_DerivedToBase: {
1508 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1509
1510 LValue lv = emitLValue(e->getSubExpr());
1511 Address thisAddr = lv.getAddress();
1512
1513 // Perform the derived-to-base conversion
1514 Address baseAddr =
1515 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1516 /*NullCheckValue=*/false, e->getExprLoc());
1517
1518 // TODO: Support accesses to members of base classes in TBAA. For now, we
1519 // conservatively pretend that the complete object is of the base class
1520 // type.
1522 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1523 }
1524
1525 case CK_BaseToDerived: {
1526 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1527 LValue lv = emitLValue(e->getSubExpr());
1528
1529 // Perform the base-to-derived conversion
1531 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1532 e->path(), /*NullCheckValue=*/false);
1533 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1534 // performed and the object is not of the derived type.
1536
1538 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1539 }
1540
1541 case CK_ZeroToOCLOpaqueType:
1542 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1543 }
1544
1545 llvm_unreachable("Invalid cast kind");
1546}
1547
1549 const MemberExpr *me) {
1550 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1551 // Try to emit static variable member expressions as DREs.
1552 return DeclRefExpr::Create(
1554 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1555 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1556 }
1557 return nullptr;
1558}
1559
1561 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1563 return emitDeclRefLValue(dre);
1564 }
1565
1566 Expr *baseExpr = e->getBase();
1567 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1568 LValue baseLV;
1569 if (e->isArrow()) {
1570 LValueBaseInfo baseInfo;
1572 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1573 QualType ptrTy = baseExpr->getType()->getPointeeType();
1575 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1576 } else {
1578 baseLV = emitLValue(baseExpr);
1579 }
1580
1581 const NamedDecl *nd = e->getMemberDecl();
1582 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1583 LValue lv = emitLValueForField(baseLV, field);
1585 if (getLangOpts().OpenMP) {
1586 // If the member was explicitly marked as nontemporal, mark it as
1587 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1588 // to children as nontemporal too.
1589 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1590 }
1591 return lv;
1592 }
1593
1594 if (isa<FunctionDecl>(nd)) {
1595 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1596 return LValue();
1597 }
1598
1599 llvm_unreachable("Unhandled member declaration!");
1600}
1601
1602/// Evaluate an expression into a given memory location.
1604 Qualifiers quals, bool isInit) {
1605 // FIXME: This function should take an LValue as an argument.
1606 switch (getEvaluationKind(e->getType())) {
1607 case cir::TEK_Complex: {
1608 LValue lv = makeAddrLValue(location, e->getType());
1609 emitComplexExprIntoLValue(e, lv, isInit);
1610 return;
1611 }
1612
1613 case cir::TEK_Aggregate: {
1614 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1618 return;
1619 }
1620
1621 case cir::TEK_Scalar: {
1623 LValue lv = makeAddrLValue(location, e->getType());
1624 emitStoreThroughLValue(rv, lv);
1625 return;
1626 }
1627 }
1628
1629 llvm_unreachable("bad evaluation kind");
1630}
1631
1633 const MaterializeTemporaryExpr *m,
1634 const Expr *inner) {
1635 // TODO(cir): cgf.getTargetHooks();
1636 switch (m->getStorageDuration()) {
1637 case SD_FullExpression:
1638 case SD_Automatic: {
1639 QualType ty = inner->getType();
1640
1642
1643 // The temporary memory should be created in the same scope as the extending
1644 // declaration of the temporary materialization expression.
1645 cir::AllocaOp extDeclAlloca;
1646 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1647 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1648 if (extDeclAddrIter != cgf.localDeclMap.end())
1649 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1650 }
1651 mlir::OpBuilder::InsertPoint ip;
1652 if (extDeclAlloca)
1653 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1654 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1655 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1656 ip);
1657 }
1658 case SD_Thread:
1659 case SD_Static: {
1660 auto addr =
1661 mlir::cast<cir::GlobalOp>(cgf.cgm.getAddrOfGlobalTemporary(m, inner));
1662 auto getGlobal = cgf.cgm.getBuilder().createGetGlobal(addr);
1663 assert(addr.getAlignment().has_value() &&
1664 "This should always have an alignment");
1665 return Address(getGlobal,
1666 clang::CharUnits::fromQuantity(addr.getAlignment().value()));
1667 }
1668
1669 case SD_Dynamic:
1670 llvm_unreachable("temporary can't have dynamic storage duration");
1671 }
1672 llvm_unreachable("unknown storage duration");
1673}
1674
1676 const MaterializeTemporaryExpr *m,
1677 const Expr *e, Address referenceTemporary) {
1678 // Objective-C++ ARC:
1679 // If we are binding a reference to a temporary that has ownership, we
1680 // need to perform retain/release operations on the temporary.
1681 //
1682 // FIXME(ogcg): This should be looking at e, not m.
1683 if (m->getType().getObjCLifetime()) {
1684 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1685 return;
1686 }
1687
1689 if (dk == QualType::DK_none)
1690 return;
1691
1692 switch (m->getStorageDuration()) {
1693 case SD_Static:
1694 case SD_Thread: {
1695 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1696 if (const auto *classDecl =
1698 classDecl && !classDecl->hasTrivialDestructor())
1699 // Get the destructor for the reference temporary.
1700 referenceTemporaryDtor = classDecl->getDestructor();
1701
1702 if (!referenceTemporaryDtor)
1703 return;
1704
1705 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1706 "storage duration with destructors");
1707 break;
1708 }
1709
1710 case SD_FullExpression:
1711 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1713 break;
1714
1715 case SD_Automatic:
1716 cgf.cgm.errorNYI(e->getSourceRange(),
1717 "pushTemporaryCleanup: automatic storage duration");
1718 break;
1719
1720 case SD_Dynamic:
1721 llvm_unreachable("temporary cannot have dynamic storage duration");
1722 }
1723}
1724
1726 const MaterializeTemporaryExpr *m) {
1727 const Expr *e = m->getSubExpr();
1728
1729 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1730 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1731 "Reference should never be pseudo-strong!");
1732
1733 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1734 // as that will cause the lifetime adjustment to be lost for ARC
1735 auto ownership = m->getType().getObjCLifetime();
1736 if (ownership != Qualifiers::OCL_None &&
1737 ownership != Qualifiers::OCL_ExplicitNone) {
1738 cgm.errorNYI(e->getSourceRange(),
1739 "emitMaterializeTemporaryExpr: ObjCLifetime");
1740 return {};
1741 }
1742
1745 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1746
1747 for (const Expr *ignored : commaLHSs)
1748 emitIgnoredExpr(ignored);
1749
1750 if (isa<OpaqueValueExpr>(e)) {
1751 cgm.errorNYI(e->getSourceRange(),
1752 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1753 return {};
1754 }
1755
1756 // Create and initialize the reference temporary.
1757 Address object = createReferenceTemporary(*this, m, e);
1758
1759 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1760 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1761 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1762 return {};
1763 } else {
1765 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1766 }
1767 pushTemporaryCleanup(*this, m, e, object);
1768
1769 // Perform derived-to-base casts and/or field accesses, to get from the
1770 // temporary object we created (and, potentially, for which we extended
1771 // the lifetime) to the subobject we're binding the reference to.
1772 if (!adjustments.empty()) {
1773 cgm.errorNYI(e->getSourceRange(),
1774 "emitMaterializeTemporaryExpr: Adjustments");
1775 return {};
1776 }
1777
1778 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1779}
1780
1781LValue
1784
1785 auto it = opaqueLValues.find(e);
1786 if (it != opaqueLValues.end())
1787 return it->second;
1788
1789 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1790 return emitLValue(e->getSourceExpr());
1791}
1792
1793RValue
1796
1797 auto it = opaqueRValues.find(e);
1798 if (it != opaqueRValues.end())
1799 return it->second;
1800
1801 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1802 return emitAnyExpr(e->getSourceExpr());
1803}
1804
1806 if (e->isFileScope()) {
1807 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1808 return {};
1809 }
1810
1811 if (e->getType()->isVariablyModifiedType())
1813
1814 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1815 ".compoundliteral");
1816 const Expr *initExpr = e->getInitializer();
1817 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1818
1819 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1820 /*Init*/ true);
1821
1822 // Block-scope compound literals are destroyed at the end of the enclosing
1823 // scope in C.
1824 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1825 cgm.errorNYI(e->getSourceRange(),
1826 "emitCompoundLiteralLValue: non C++ DestructedType");
1827 return {};
1828 }
1829
1830 return result;
1831}
1832
1834 RValue rv = emitCallExpr(e);
1835
1836 if (!rv.isScalar()) {
1837 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1838 return {};
1839 }
1840
1841 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1842 "Can't have a scalar return unless the return type is a "
1843 "reference type!");
1844
1846}
1847
1849 // Comma expressions just emit their LHS then their RHS as an l-value.
1850 if (e->getOpcode() == BO_Comma) {
1851 emitIgnoredExpr(e->getLHS());
1852 return emitLValue(e->getRHS());
1853 }
1854
1855 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1857
1858 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1859
1860 // Note that in all of these cases, __block variables need the RHS
1861 // evaluated first just in case the variable gets moved by the RHS.
1862
1864 case cir::TEK_Scalar: {
1866 if (e->getLHS()->getType().getObjCLifetime() !=
1868 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1869 return {};
1870 }
1871
1872 RValue rv = emitAnyExpr(e->getRHS());
1873 LValue lv = emitLValue(e->getLHS());
1874
1875 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1876 if (lv.isBitField())
1878 else
1879 emitStoreThroughLValue(rv, lv);
1880
1881 if (getLangOpts().OpenMP) {
1882 cgm.errorNYI(e->getSourceRange(), "openmp");
1883 return {};
1884 }
1885
1886 return lv;
1887 }
1888
1889 case cir::TEK_Complex: {
1891 }
1892
1893 case cir::TEK_Aggregate:
1894 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1895 return {};
1896 }
1897 llvm_unreachable("bad evaluation kind");
1898}
1899
1900/// Emit code to compute the specified expression which
1901/// can have any type. The result is returned as an RValue struct.
1903 bool ignoreResult) {
1905 case cir::TEK_Scalar:
1906 return RValue::get(emitScalarExpr(e, ignoreResult));
1907 case cir::TEK_Complex:
1909 case cir::TEK_Aggregate: {
1910 if (!ignoreResult && aggSlot.isIgnored())
1911 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1913 emitAggExpr(e, aggSlot);
1914 return aggSlot.asRValue();
1915 }
1916 }
1917 llvm_unreachable("bad evaluation kind");
1918}
1919
1920// Detect the unusual situation where an inline version is shadowed by a
1921// non-inline version. In that case we should pick the external one
1922// everywhere. That's GCC behavior too.
1924 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1925 if (!pd->isInlineBuiltinDeclaration())
1926 return false;
1927 return true;
1928}
1929
1930CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1931 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1932
1933 if (unsigned builtinID = fd->getBuiltinID()) {
1934 StringRef ident = cgm.getMangledName(gd);
1935 std::string fdInlineName = (ident + ".inline").str();
1936
1937 bool isPredefinedLibFunction =
1938 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1939 // TODO: Read no-builtin function attribute and set this accordingly.
1940 // Using false here matches OGCG's default behavior - builtins are called
1941 // as builtins unless explicitly disabled. The previous value of true was
1942 // overly conservative and caused functions to be marked as no_inline when
1943 // they shouldn't be.
1944 bool hasAttributeNoBuiltin = false;
1946
1947 // When directing calling an inline builtin, call it through it's mangled
1948 // name to make it clear it's not the actual builtin.
1949 auto fn = cast<cir::FuncOp>(curFn);
1950 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1951 cir::FuncOp clone =
1952 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1953
1954 if (!clone) {
1955 // Create a forward declaration - the body will be generated in
1956 // generateCode when the function definition is processed
1957 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1958 mlir::OpBuilder::InsertionGuard guard(builder);
1959 builder.setInsertionPointToStart(cgm.getModule().getBody());
1960
1961 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1962 calleeFunc.getFunctionType());
1963 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1964 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1965 clone.setSymVisibility("private");
1966 clone.setInlineKind(cir::InlineKind::AlwaysInline);
1967 }
1968 return CIRGenCallee::forDirect(clone, gd);
1969 }
1970
1971 // Replaceable builtins provide their own implementation of a builtin. If we
1972 // are in an inline builtin implementation, avoid trivial infinite
1973 // recursion. Honor __attribute__((no_builtin("foo"))) or
1974 // __attribute__((no_builtin)) on the current function unless foo is
1975 // not a predefined library function which means we must generate the
1976 // builtin no matter what.
1977 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1978 return CIRGenCallee::forBuiltin(builtinID, fd);
1979 }
1980
1981 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1982
1983 if ((cgm.getLangOpts().CUDA || cgm.getLangOpts().HIP) &&
1984 !cgm.getLangOpts().CUDAIsDevice && fd->hasAttr<CUDAGlobalAttr>()) {
1985 mlir::Operation *handle = cgm.getCUDARuntime().getKernelHandle(callee, gd);
1986 callee =
1987 mlir::cast<cir::FuncOp>(*cgm.getCUDARuntime().getKernelStub(handle));
1988 }
1989
1990 return CIRGenCallee::forDirect(callee, gd);
1991}
1992
1994 if (ty->isVoidType())
1995 return RValue::get(nullptr);
1996
1997 cgm.errorNYI("unsupported type for undef rvalue");
1998 return RValue::get(nullptr);
1999}
2000
2002 const CIRGenCallee &origCallee,
2003 const clang::CallExpr *e,
2005 // Get the actual function type. The callee type will always be a pointer to
2006 // function type or a block pointer type.
2007 assert(calleeTy->isFunctionPointerType() &&
2008 "Callee must have function pointer type!");
2009
2010 calleeTy = getContext().getCanonicalType(calleeTy);
2011 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
2012
2013 CIRGenCallee callee = origCallee;
2014
2015 if (getLangOpts().CPlusPlus)
2017
2018 const auto *fnType = cast<FunctionType>(pointeeTy);
2019
2021
2022 CallArgList args;
2024
2025 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
2026 e->getDirectCallee());
2027
2028 const CIRGenFunctionInfo &funcInfo =
2029 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2030
2031 // C99 6.5.2.2p6:
2032 // If the expression that denotes the called function has a type that does
2033 // not include a prototype, [the default argument promotions are performed].
2034 // If the number of arguments does not equal the number of parameters, the
2035 // behavior is undefined. If the function is defined with a type that
2036 // includes a prototype, and either the prototype ends with an ellipsis (,
2037 // ...) or the types of the arguments after promotion are not compatible
2038 // with the types of the parameters, the behavior is undefined. If the
2039 // function is defined with a type that does not include a prototype, and
2040 // the types of the arguments after promotion are not compatible with those
2041 // of the parameters after promotion, the behavior is undefined [except in
2042 // some trivial cases].
2043 // That is, in the general case, we should assume that a call through an
2044 // unprototyped function type works like a *non-variadic* call. The way we
2045 // make this work is to cast to the exxact type fo the promoted arguments.
2046 if (isa<FunctionNoProtoType>(fnType)) {
2049 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2050 // get non-variadic function type
2051 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2052 calleeTy.getReturnType(), false);
2053 auto calleePtrTy = cir::PointerType::get(calleeTy);
2054
2055 mlir::Operation *fn = callee.getFunctionPointer();
2056 mlir::Value addr;
2057 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2058 addr = cir::GetGlobalOp::create(
2059 builder, getLoc(e->getSourceRange()),
2060 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2061 } else {
2062 addr = fn->getResult(0);
2063 }
2064
2065 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2066 callee.setFunctionPointer(fn);
2067 }
2068
2070 assert(!cir::MissingFeatures::hip());
2072
2073 cir::CIRCallOpInterface callOp;
2074 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2075 getLoc(e->getExprLoc()));
2076
2078
2079 return callResult;
2080}
2081
2083 e = e->IgnoreParens();
2084
2085 // Look through function-to-pointer decay.
2086 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2087 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2088 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2089 return emitCallee(implicitCast->getSubExpr());
2090 }
2091 // When performing an indirect call through a function pointer lvalue, the
2092 // function pointer lvalue is implicitly converted to an rvalue through an
2093 // lvalue-to-rvalue conversion.
2094 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2095 "unexpected implicit cast on function pointers");
2096 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2097 // Resolve direct calls.
2098 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2099 return emitDirectCallee(funcDecl);
2100 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2101 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2102 emitIgnoredExpr(me->getBase());
2103 return emitDirectCallee(fd);
2104 }
2105 // Else fall through to the indirect reference handling below.
2106 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2108 }
2109
2110 // Otherwise, we have an indirect reference.
2111 mlir::Value calleePtr;
2113 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2114 calleePtr = emitScalarExpr(e);
2115 functionType = ptrType->getPointeeType();
2116 } else {
2117 functionType = e->getType();
2118 calleePtr = emitLValue(e).getPointer();
2119 }
2120 assert(functionType->isFunctionType());
2121
2122 GlobalDecl gd;
2123 if (const auto *vd =
2124 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2125 gd = GlobalDecl(vd);
2126
2127 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2128 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2129 return callee;
2130}
2131
2135
2136 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2138
2139 if (const auto *cudaKernelCallExpr = dyn_cast<CUDAKernelCallExpr>(e))
2141
2142 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2143 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2144 // operator member call.
2145 if (const CXXMethodDecl *md =
2146 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2147 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2148 // A CXXOperatorCallExpr is created even for explicit object methods, but
2149 // these should be treated like static function calls. Fall through to do
2150 // that.
2151 }
2152
2153 CIRGenCallee callee = emitCallee(e->getCallee());
2154
2155 if (callee.isBuiltin())
2156 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2157 returnValue);
2158
2159 if (callee.isPseudoDestructor())
2161
2162 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2163}
2164
2165/// Emit code to compute the specified expression, ignoring the result.
2167 if (e->isPRValue()) {
2168 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2169 return;
2170 }
2171
2172 // Just emit it as an l-value and drop the result.
2173 emitLValue(e);
2174}
2175
2177 LValueBaseInfo *baseInfo) {
2179 assert(e->getType()->isArrayType() &&
2180 "Array to pointer decay must have array source type!");
2181
2182 // Expressions of array type can't be bitfields or vector elements.
2183 LValue lv = emitLValue(e);
2184 Address addr = lv.getAddress();
2185
2186 // If the array type was an incomplete type, we need to make sure
2187 // the decay ends up being the right type.
2188 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2189
2190 if (e->getType()->isVariableArrayType())
2191 return addr;
2192
2193 [[maybe_unused]] auto pointeeTy =
2194 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2195
2196 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2197 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2198 assert(pointeeTy == arrayTy);
2199
2200 // The result of this decay conversion points to an array element within the
2201 // base lvalue. However, since TBAA currently does not support representing
2202 // accesses to elements of member arrays, we conservatively represent accesses
2203 // to the pointee object as if it had no any base lvalue specified.
2204 // TODO: Support TBAA for member arrays.
2207
2208 mlir::Value ptr = builder.maybeBuildArrayDecay(
2209 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2210 convertTypeForMem(eltType));
2211 return Address(ptr, addr.getAlignment());
2212}
2213
2214/// Given the address of a temporary variable, produce an r-value of its type.
2218 switch (getEvaluationKind(type)) {
2219 case cir::TEK_Complex:
2220 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2221 case cir::TEK_Aggregate:
2222 return lvalue.asAggregateRValue();
2223 case cir::TEK_Scalar:
2224 return RValue::get(emitLoadOfScalar(lvalue, loc));
2225 }
2226 llvm_unreachable("bad evaluation kind");
2227}
2228
2229/// Emit an `if` on a boolean condition, filling `then` and `else` into
2230/// appropriated regions.
2231mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2232 const Stmt *thenS,
2233 const Stmt *elseS) {
2234 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2235 std::optional<mlir::Location> elseLoc;
2236 if (elseS)
2237 elseLoc = getLoc(elseS->getSourceRange());
2238
2239 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2241 cond, /*thenBuilder=*/
2242 [&](mlir::OpBuilder &, mlir::Location) {
2243 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2244 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2245 },
2246 thenLoc,
2247 /*elseBuilder=*/
2248 [&](mlir::OpBuilder &, mlir::Location) {
2249 assert(elseLoc && "Invalid location for elseS.");
2250 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2251 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2252 },
2253 elseLoc);
2254
2255 return mlir::LogicalResult::success(resThen.succeeded() &&
2256 resElse.succeeded());
2257}
2258
2259/// Emit an `if` on a boolean condition, filling `then` and `else` into
2260/// appropriated regions.
2262 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2263 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2264 std::optional<mlir::Location> elseLoc) {
2265 // Attempt to be as accurate as possible with IfOp location, generate
2266 // one fused location that has either 2 or 4 total locations, depending
2267 // on else's availability.
2268 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2269 if (elseLoc)
2270 ifLocs.push_back(*elseLoc);
2271 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2272
2273 // Emit the code with the fully general case.
2274 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2275 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2276 /*thenBuilder=*/thenBuilder,
2277 /*elseBuilder=*/elseBuilder);
2278}
2279
2280/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2281mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2282 const Expr *cond) {
2285 cond = cond->IgnoreParens();
2286
2287 // In LLVM the condition is reversed here for efficient codegen.
2288 // This should be done in CIR prior to LLVM lowering, if we do now
2289 // we can make CIR based diagnostics misleading.
2290 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2292
2293 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2294 Expr *trueExpr = condOp->getTrueExpr();
2295 Expr *falseExpr = condOp->getFalseExpr();
2296 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2297
2298 mlir::Value ternaryOpRes =
2299 cir::TernaryOp::create(
2300 builder, loc, condV, /*thenBuilder=*/
2301 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2302 mlir::Value lhs = emitScalarExpr(trueExpr);
2303 cir::YieldOp::create(b, loc, lhs);
2304 },
2305 /*elseBuilder=*/
2306 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2307 mlir::Value rhs = emitScalarExpr(falseExpr);
2308 cir::YieldOp::create(b, loc, rhs);
2309 })
2310 .getResult();
2311
2312 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2313 getContext().BoolTy, condOp->getExprLoc());
2314 }
2315
2316 if (isa<CXXThrowExpr>(cond)) {
2317 cgm.errorNYI("NYI");
2318 return createDummyValue(loc, cond->getType());
2319 }
2320
2321 // If the branch has a condition wrapped by __builtin_unpredictable,
2322 // create metadata that specifies that the branch is unpredictable.
2323 // Don't bother if not optimizing because that metadata would not be used.
2325
2326 // Emit the code with the fully general case.
2327 return evaluateExprAsBool(cond);
2328}
2329
2330mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2331 mlir::Location loc, CharUnits alignment,
2332 bool insertIntoFnEntryBlock,
2333 mlir::Value arraySize) {
2334 mlir::Block *entryBlock = insertIntoFnEntryBlock
2336 : curLexScope->getEntryBlock();
2337
2338 // If this is an alloca in the entry basic block of a cir.try and there's
2339 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2340 // scope instead. This is necessary in order to guarantee all SSA values are
2341 // reachable during cleanups.
2342 if (auto tryOp =
2343 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2344 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2345 entryBlock = &scopeOp.getScopeRegion().front();
2346 }
2347
2348 return emitAlloca(name, ty, loc, alignment,
2349 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2350}
2351
2352mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2353 mlir::Location loc, CharUnits alignment,
2354 mlir::OpBuilder::InsertPoint ip,
2355 mlir::Value arraySize) {
2356 // CIR uses its own alloca address space rather than follow the target data
2357 // layout like original CodeGen. The data layout awareness should be done in
2358 // the lowering pass instead.
2359 cir::PointerType localVarPtrTy =
2361 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2362
2363 mlir::Value addr;
2364 {
2365 mlir::OpBuilder::InsertionGuard guard(builder);
2366 builder.restoreInsertionPoint(ip);
2367 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2368 /*var type*/ ty, name, alignIntAttr, arraySize);
2370 }
2371 return addr;
2372}
2373
2374// Note: this function also emit constructor calls to support a MSVC extensions
2375// allowing explicit constructor function call.
2378 const Expr *callee = ce->getCallee()->IgnoreParens();
2379
2380 if (isa<BinaryOperator>(callee))
2382
2383 const auto *me = cast<MemberExpr>(callee);
2384 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2385
2386 if (md->isStatic()) {
2387 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2388 return RValue::get(nullptr);
2389 }
2390
2391 bool hasQualifier = me->hasQualifier();
2392 NestedNameSpecifier qualifier = me->getQualifier();
2393 bool isArrow = me->isArrow();
2394 const Expr *base = me->getBase();
2395
2397 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2398}
2399
2401 // Emit the expression as an lvalue.
2402 LValue lv = emitLValue(e);
2403 assert(lv.isSimple());
2404 mlir::Value value = lv.getPointer();
2405
2407
2408 return RValue::get(value);
2409}
2410
2412 LValueBaseInfo *pointeeBaseInfo) {
2413 if (refLVal.isVolatile())
2414 cgm.errorNYI(loc, "load of volatile reference");
2415
2416 cir::LoadOp load =
2417 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2418 refLVal.getAddress().getPointer());
2419
2421
2422 QualType pointeeType = refLVal.getType()->getPointeeType();
2423 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2424 return Address(load, convertTypeForMem(pointeeType), align);
2425}
2426
2428 mlir::Location loc,
2429 QualType refTy,
2430 AlignmentSource source) {
2431 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2432 LValueBaseInfo pointeeBaseInfo;
2434 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2435 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2436 pointeeBaseInfo);
2437}
2438
2439void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2440 cir::TrapOp::create(builder, loc);
2441 if (createNewBlock)
2442 builder.createBlock(builder.getBlock()->getParent());
2443}
2444
2446 bool createNewBlock) {
2448 cir::UnreachableOp::create(builder, getLoc(loc));
2449 if (createNewBlock)
2450 builder.createBlock(builder.getBlock()->getParent());
2451}
2452
2453mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2454 clang::QualType qt) {
2455 mlir::Type t = convertType(qt);
2456 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2457 return builder.createDummyValue(loc, t, alignment);
2458}
2459
2460//===----------------------------------------------------------------------===//
2461// CIR builder helpers
2462//===----------------------------------------------------------------------===//
2463
2465 const Twine &name, Address *alloca,
2466 mlir::OpBuilder::InsertPoint ip) {
2467 // FIXME: Should we prefer the preferred type alignment here?
2468 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2469 alloca, ip);
2470}
2471
2473 mlir::Location loc, const Twine &name,
2474 Address *alloca,
2475 mlir::OpBuilder::InsertPoint ip) {
2476 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2477 /*ArraySize=*/nullptr, alloca, ip);
2478 if (ty->isConstantMatrixType()) {
2480 cgm.errorNYI(loc, "temporary matrix value");
2481 }
2482 return result;
2483}
2484
2485/// This creates a alloca and inserts it into the entry block of the
2486/// current region.
2488 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2489 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2490 cir::AllocaOp alloca = ip.isSet()
2491 ? createTempAlloca(ty, loc, name, ip, arraySize)
2492 : createTempAlloca(ty, loc, name, arraySize);
2493 alloca.setAlignmentAttr(cgm.getSize(align));
2494 return Address(alloca, ty, align);
2495}
2496
2497/// This creates a alloca and inserts it into the entry block. The alloca is
2498/// casted to default address space if necessary.
2499// TODO(cir): Implement address space casting to match classic codegen's
2500// CreateTempAlloca behavior with DestLangAS parameter
2502 mlir::Location loc, const Twine &name,
2503 mlir::Value arraySize,
2504 Address *allocaAddr,
2505 mlir::OpBuilder::InsertPoint ip) {
2506 Address alloca =
2507 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2508 if (allocaAddr)
2509 *allocaAddr = alloca;
2510 mlir::Value v = alloca.getPointer();
2511 // Alloca always returns a pointer in alloca address space, which may
2512 // be different from the type defined by the language. For example,
2513 // in C++ the auto variables are in the default address space. Therefore
2514 // cast alloca to the default address space when necessary.
2515
2516 cir::PointerType dstTy;
2518 dstTy = builder.getPointerTo(ty, getCIRAllocaAddressSpace());
2519 else
2520 dstTy = builder.getPointerTo(ty, clang::LangAS::Default);
2521 v = performAddrSpaceCast(v, dstTy);
2522
2523 return Address(v, ty, align);
2524}
2525
2526/// This creates an alloca and inserts it into the entry block if \p ArraySize
2527/// is nullptr, otherwise inserts it at the current insertion point of the
2528/// builder.
2529cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2530 mlir::Location loc,
2531 const Twine &name,
2532 mlir::Value arraySize,
2533 bool insertIntoFnEntryBlock) {
2534 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2535 insertIntoFnEntryBlock, arraySize)
2536 .getDefiningOp());
2537}
2538
2539/// This creates an alloca and inserts it into the provided insertion point
2540cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2541 mlir::Location loc,
2542 const Twine &name,
2543 mlir::OpBuilder::InsertPoint ip,
2544 mlir::Value arraySize) {
2545 assert(ip.isSet() && "Insertion point is not set");
2546 return mlir::cast<cir::AllocaOp>(
2547 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2548 .getDefiningOp());
2549}
2550
2551/// CreateDefaultAlignTempAlloca - This creates an alloca with the
2552/// default alignment of the corresponding LLVM type, which is *not*
2553/// guaranteed to be related in any way to the expected alignment of
2554/// an AST type that might have been lowered to Ty.
2556 mlir::Location loc,
2557 const Twine &name) {
2558 CharUnits align =
2559 CharUnits::fromQuantity(cgm.getDataLayout().getABITypeAlign(ty));
2560 return createTempAlloca(ty, align, loc, name);
2561}
2562
2563/// Try to emit a reference to the given value without producing it as
2564/// an l-value. For many cases, this is just an optimization, but it avoids
2565/// us needing to emit global copies of variables if they're named without
2566/// triggering a formal use in a context where we can't emit a direct
2567/// reference to them, for instance if a block or lambda or a member of a
2568/// local class uses a const int variable or constexpr variable from an
2569/// enclosing function.
2570///
2571/// For named members of enums, this is the only way they are emitted.
2574 const ValueDecl *value = refExpr->getDecl();
2575
2576 // There is a lot more to do here, but for now only EnumConstantDecl is
2577 // supported.
2579
2580 // The value needs to be an enum constant or a constant variable.
2581 if (!isa<EnumConstantDecl>(value))
2582 return ConstantEmission();
2583
2584 Expr::EvalResult result;
2585 if (!refExpr->EvaluateAsRValue(result, getContext()))
2586 return ConstantEmission();
2587
2588 QualType resultType = refExpr->getType();
2589
2590 // As long as we're only handling EnumConstantDecl, there should be no
2591 // side-effects.
2592 assert(!result.HasSideEffects);
2593
2594 // Emit as a constant.
2595 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2596 // somewhat heavy refactoring...)
2597 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2598 refExpr->getLocation(), result.Val, resultType);
2599 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2600 assert(cstToEmit && "expected a typed attribute");
2601
2603
2604 return ConstantEmission::forValue(cstToEmit);
2605}
2606
2610 return tryEmitAsConstant(dre);
2611 return ConstantEmission();
2612}
2613
2615 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2616 assert(constant && "not a constant");
2617 if (constant.isReference()) {
2618 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2619 return {};
2620 }
2621 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2622}
2623
2625 const StringLiteral *sl = e->getFunctionName();
2626 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2627 auto fn = cast<cir::FuncOp>(curFn);
2628 StringRef fnName = fn.getName();
2629 fnName.consume_front("\01");
2630 std::array<StringRef, 2> nameItems = {
2632 std::string gvName = llvm::join(nameItems, ".");
2633 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2634 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2635
2636 return emitStringLiteralLValue(sl, gvName);
2637}
2638
2643
2644namespace {
2645// Handle the case where the condition is a constant evaluatable simple integer,
2646// which means we don't have to separately handle the true/false blocks.
2647std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2649 const Expr *condExpr = e->getCond();
2650 llvm::APSInt condExprVal;
2651 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2652 return std::nullopt;
2653
2654 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2655 if (!condExprVal.getBoolValue())
2656 std::swap(live, dead);
2657
2658 if (cgf.containsLabel(dead))
2659 return std::nullopt;
2660
2661 // If the true case is live, we need to track its region.
2664 // If a throw expression we emit it and return an undefined lvalue
2665 // because it can't be used.
2666 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2667 cgf.emitCXXThrowExpr(throwExpr);
2668 // Return an undefined lvalue - the throw terminates execution
2669 // so this value will never actually be used
2670 mlir::Type elemTy = cgf.convertType(dead->getType());
2671 mlir::Value undefPtr =
2672 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2673 cgf.getLoc(throwExpr->getSourceRange()));
2674 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2675 dead->getType());
2676 }
2677 return cgf.emitLValue(live);
2678}
2679
2680/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2681/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2682/// LValue is returned and the current block has been terminated.
2683static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2684 const Expr *operand) {
2685 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2686 cgf.emitCXXThrowExpr(throwExpr);
2687 return std::nullopt;
2688 }
2689
2690 return cgf.emitLValue(operand);
2691}
2692} // namespace
2693
2694// Create and generate the 3 blocks for a conditional operator.
2695// Leaves the 'current block' in the continuation basic block.
2696template <typename FuncTy>
2699 const FuncTy &branchGenFunc) {
2700 ConditionalInfo info;
2701 ConditionalEvaluation eval(*this);
2702 mlir::Location loc = getLoc(e->getSourceRange());
2703 CIRGenBuilderTy &builder = getBuilder();
2704
2705 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2707 mlir::Type yieldTy{};
2708
2709 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2710 const Expr *expr, std::optional<LValue> &resultLV) {
2711 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2712 curLexScope->setAsTernary();
2713
2715 eval.beginEvaluation();
2716 resultLV = branchGenFunc(*this, expr);
2717 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2718 eval.endEvaluation();
2719
2720 if (resultPtr) {
2721 yieldTy = resultPtr.getType();
2722 cir::YieldOp::create(b, loc, resultPtr);
2723 } else {
2724 // If LHS or RHS is a void expression we need
2725 // to patch arms as to properly match yield types.
2726 // If the current block's terminator is an UnreachableOp (from a throw),
2727 // we don't need a yield
2728 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2729 mlir::Operation *terminator =
2730 builder.getInsertionBlock()->getTerminator();
2731 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2732 insertPoints.push_back(b.saveInsertionPoint());
2733 }
2734 }
2735 };
2736
2737 info.result = cir::TernaryOp::create(
2738 builder, loc, condV,
2739 /*trueBuilder=*/
2740 [&](mlir::OpBuilder &b, mlir::Location loc) {
2741 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2742 },
2743 /*falseBuilder=*/
2744 [&](mlir::OpBuilder &b, mlir::Location loc) {
2745 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2746 })
2747 .getResult();
2748
2749 // If both arms are void, so be it.
2750 if (!yieldTy)
2751 yieldTy = voidTy;
2752
2753 // Insert required yields.
2754 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2755 mlir::OpBuilder::InsertionGuard guard(builder);
2756 builder.restoreInsertionPoint(toInsert);
2757
2758 // Block does not return: build empty yield.
2759 if (!yieldTy) {
2760 cir::YieldOp::create(builder, loc);
2761 } else { // Block returns: set null yield value.
2762 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2763 cir::YieldOp::create(builder, loc, op0);
2764 }
2765 }
2766
2767 return info;
2768}
2769
2772 if (!expr->isGLValue()) {
2773 // ?: here should be an aggregate.
2774 assert(hasAggregateEvaluationKind(expr->getType()) &&
2775 "Unexpected conditional operator!");
2776 return emitAggExprToLValue(expr);
2777 }
2778
2779 OpaqueValueMapping binding(*this, expr);
2780 if (std::optional<LValue> res =
2781 handleConditionalOperatorLValueSimpleCase(*this, expr))
2782 return *res;
2783
2784 ConditionalInfo info =
2785 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2786 return emitLValueOrThrowExpression(cgf, e);
2787 });
2788
2789 if ((info.lhs && !info.lhs->isSimple()) ||
2790 (info.rhs && !info.rhs->isSimple())) {
2791 cgm.errorNYI(expr->getSourceRange(),
2792 "unsupported conditional operator with non-simple lvalue");
2793 return LValue();
2794 }
2795
2796 if (info.lhs && info.rhs) {
2797 Address lhsAddr = info.lhs->getAddress();
2798 Address rhsAddr = info.rhs->getAddress();
2799 Address result(info.result, lhsAddr.getElementType(),
2800 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2801 AlignmentSource alignSource =
2802 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2803 info.rhs->getBaseInfo().getAlignmentSource());
2805 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2806 }
2807
2808 assert((info.lhs || info.rhs) &&
2809 "both operands of glvalue conditional are throw-expressions?");
2810 return info.lhs ? *info.lhs : *info.rhs;
2811}
2812
2813/// An LValue is a candidate for having its loads and stores be made atomic if
2814/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2815/// performing such an operation can be performed without a libcall.
2817 if (!cgm.getLangOpts().MSVolatile)
2818 return false;
2819
2820 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2821 return false;
2822}
2823
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global, bool threadLocal=false)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2767
SourceLocation getEndLoc() const
Definition Expr.h:2770
QualType getElementType() const
Definition TypeBase.h:3742
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Type getType() const
Definition Address.h:115
bool isValid() const
Definition Address.h:75
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:139
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Operation * getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *mte, const Expr *init)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3766
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3283
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
const Expr * getBase() const
Definition Expr.h:6580
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4418
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4529
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4827
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4767
Represents a function declaration or definition.
Definition Decl.h:2000
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4921
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4946
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4971
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8428
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1185
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4327
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3928
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8991
bool isBooleanType() const
Definition TypeBase.h:9128
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:419
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9294
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8724
bool isFunctionPointerType() const
Definition TypeBase.h:8692
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2374
bool isConstantMatrixType() const
Definition TypeBase.h:8792
bool isPointerType() const
Definition TypeBase.h:8625
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
bool isReferenceType() const
Definition TypeBase.h:8649
bool isVariableArrayType() const
Definition TypeBase.h:8736
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorBoolType() const
Definition TypeBase.h:8772
bool isAnyComplexType() const
Definition TypeBase.h:8760
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9171
bool isAtomicType() const
Definition TypeBase.h:8817
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2808
bool isFunctionType() const
Definition TypeBase.h:8621
bool isVectorType() const
Definition TypeBase.h:8764
bool isSubscriptableVectorType() const
Definition TypeBase.h:8784
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2396
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2329
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2322
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2180
bool hasInit() const
Definition Decl.cpp:2410
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2378
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
Represents a GCC generic vector type.
Definition TypeBase.h:4183
Defines the clang::TargetInfo interface.
mlir::ptr::MemorySpaceAttrInterface toCIRAddressSpaceAttr(mlir::MLIRContext &ctx, clang::LangAS langAS)
Convert an AST LangAS to the appropriate CIR address space attribute interface.
OverflowBehavior
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, CUDAKernelCallExpr > cudaKernelCallExpr
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615