clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
31#include <optional>
32
33using namespace clang;
34using namespace clang::CIRGen;
35using namespace cir;
36
37/// Get the address of a zero-sized field within a record. The resulting address
38/// doesn't necessarily have the right type.
40 const FieldDecl *field,
41 llvm::StringRef fieldName,
42 unsigned fieldIndex) {
43 if (field->isZeroSize(getContext())) {
44 cgm.errorNYI(field->getSourceRange(),
45 "emitAddrOfFieldStorage: zero-sized field");
46 return Address::invalid();
47 }
48
49 mlir::Location loc = getLoc(field->getLocation());
50
51 mlir::Type fieldType = convertType(field->getType());
52 auto fieldPtr = cir::PointerType::get(fieldType);
53 // For most cases fieldName is the same as field->getName() but for lambdas,
54 // which do not currently carry the name, so it can be passed down from the
55 // CaptureStmt.
56 cir::GetMemberOp memberAddr = builder.createGetMember(
57 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
58
59 // Retrieve layout information, compute alignment and return the final
60 // address.
61 const RecordDecl *rec = field->getParent();
62 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
63 unsigned idx = layout.getCIRFieldNo(field);
65 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
66 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
67}
68
69/// Given an expression of pointer type, try to
70/// derive a more accurate bound on the alignment of the pointer.
72 LValueBaseInfo *baseInfo) {
73 // We allow this with ObjC object pointers because of fragile ABIs.
74 assert(expr->getType()->isPointerType() ||
75 expr->getType()->isObjCObjectPointerType());
76 expr = expr->IgnoreParens();
77
78 // Casts:
79 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
80 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
81 cgm.emitExplicitCastExprType(ece);
82
83 switch (ce->getCastKind()) {
84 // Non-converting casts (but not C's implicit conversion from void*).
85 case CK_BitCast:
86 case CK_NoOp:
87 case CK_AddressSpaceConversion: {
88 if (const auto *ptrTy =
89 ce->getSubExpr()->getType()->getAs<PointerType>()) {
90 if (ptrTy->getPointeeType()->isVoidType())
91 break;
92
93 LValueBaseInfo innerBaseInfo;
95 Address addr =
96 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
97 if (baseInfo)
98 *baseInfo = innerBaseInfo;
99
100 if (isa<ExplicitCastExpr>(ce)) {
101 LValueBaseInfo targetTypeBaseInfo;
102
103 const QualType pointeeType = expr->getType()->getPointeeType();
104 const CharUnits align =
105 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
106
107 // If the source l-value is opaque, honor the alignment of the
108 // casted-to type.
109 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
110 if (baseInfo)
111 baseInfo->mergeForCast(targetTypeBaseInfo);
112 addr = Address(addr.getPointer(), addr.getElementType(), align);
113 }
114 }
115
117
118 const mlir::Type eltTy =
119 convertTypeForMem(expr->getType()->getPointeeType());
120 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
121 addr, eltTy);
123
124 return addr;
125 }
126 break;
127 }
128
129 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
130 case CK_ArrayToPointerDecay:
131 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
132
133 case CK_UncheckedDerivedToBase:
134 case CK_DerivedToBase: {
137 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
138 const CXXRecordDecl *derived =
139 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
140 return getAddressOfBaseClass(addr, derived, ce->path(),
142 ce->getExprLoc());
143 }
144
145 case CK_AnyPointerToBlockPointerCast:
146 case CK_BaseToDerived:
147 case CK_BaseToDerivedMemberPointer:
148 case CK_BlockPointerToObjCPointerCast:
149 case CK_BuiltinFnToFnPtr:
150 case CK_CPointerToObjCPointerCast:
151 case CK_DerivedToBaseMemberPointer:
152 case CK_Dynamic:
153 case CK_FunctionToPointerDecay:
154 case CK_IntegralToPointer:
155 case CK_LValueToRValue:
156 case CK_LValueToRValueBitCast:
157 case CK_NullToMemberPointer:
158 case CK_NullToPointer:
159 case CK_ReinterpretMemberPointer:
160 // Common pointer conversions, nothing to do here.
161 // TODO: Is there any reason to treat base-to-derived conversions
162 // specially?
163 break;
164
165 case CK_ARCConsumeObject:
166 case CK_ARCExtendBlockObject:
167 case CK_ARCProduceObject:
168 case CK_ARCReclaimReturnedObject:
169 case CK_AtomicToNonAtomic:
170 case CK_BooleanToSignedIntegral:
171 case CK_ConstructorConversion:
172 case CK_CopyAndAutoreleaseBlockObject:
173 case CK_Dependent:
174 case CK_FixedPointCast:
175 case CK_FixedPointToBoolean:
176 case CK_FixedPointToFloating:
177 case CK_FixedPointToIntegral:
178 case CK_FloatingCast:
179 case CK_FloatingComplexCast:
180 case CK_FloatingComplexToBoolean:
181 case CK_FloatingComplexToIntegralComplex:
182 case CK_FloatingComplexToReal:
183 case CK_FloatingRealToComplex:
184 case CK_FloatingToBoolean:
185 case CK_FloatingToFixedPoint:
186 case CK_FloatingToIntegral:
187 case CK_HLSLAggregateSplatCast:
188 case CK_HLSLArrayRValue:
189 case CK_HLSLElementwiseCast:
190 case CK_HLSLVectorTruncation:
191 case CK_HLSLMatrixTruncation:
192 case CK_IntToOCLSampler:
193 case CK_IntegralCast:
194 case CK_IntegralComplexCast:
195 case CK_IntegralComplexToBoolean:
196 case CK_IntegralComplexToFloatingComplex:
197 case CK_IntegralComplexToReal:
198 case CK_IntegralRealToComplex:
199 case CK_IntegralToBoolean:
200 case CK_IntegralToFixedPoint:
201 case CK_IntegralToFloating:
202 case CK_LValueBitCast:
203 case CK_MatrixCast:
204 case CK_MemberPointerToBoolean:
205 case CK_NonAtomicToAtomic:
206 case CK_ObjCObjectLValueCast:
207 case CK_PointerToBoolean:
208 case CK_PointerToIntegral:
209 case CK_ToUnion:
210 case CK_ToVoid:
211 case CK_UserDefinedConversion:
212 case CK_VectorSplat:
213 case CK_ZeroToOCLOpaqueType:
214 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
215 }
216 }
217
218 // Unary &
219 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
220 // TODO(cir): maybe we should use cir.unary for pointers here instead.
221 if (uo->getOpcode() == UO_AddrOf) {
222 LValue lv = emitLValue(uo->getSubExpr());
223 if (baseInfo)
224 *baseInfo = lv.getBaseInfo();
226 return lv.getAddress();
227 }
228 }
229
230 // std::addressof and variants.
231 if (auto const *call = dyn_cast<CallExpr>(expr)) {
232 switch (call->getBuiltinCallee()) {
233 default:
234 break;
235 case Builtin::BIaddressof:
236 case Builtin::BI__addressof:
237 case Builtin::BI__builtin_addressof: {
238 cgm.errorNYI(expr->getSourceRange(),
239 "emitPointerWithAlignment: builtin addressof");
240 return Address::invalid();
241 }
242 }
243 }
244
245 // Otherwise, use the alignment of the type.
247 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
248 /*forPointeeType=*/true, baseInfo);
249}
250
252 bool isInit) {
253 if (!dst.isSimple()) {
254 if (dst.isVectorElt()) {
255 // Read/modify/write the vector, inserting the new element
256 const mlir::Location loc = dst.getVectorPointer().getLoc();
257 const mlir::Value vector =
258 builder.createLoad(loc, dst.getVectorAddress());
259 const mlir::Value newVector = cir::VecInsertOp::create(
260 builder, loc, vector, src.getValue(), dst.getVectorIdx());
261 builder.createStore(loc, newVector, dst.getVectorAddress());
262 return;
263 }
264
265 assert(dst.isBitField() && "Unknown LValue type");
267 return;
268
269 cgm.errorNYI(dst.getPointer().getLoc(),
270 "emitStoreThroughLValue: non-simple lvalue");
271 return;
272 }
273
275
276 assert(src.isScalar() && "Can't emit an aggregate store with this method");
277 emitStoreOfScalar(src.getValue(), dst, isInit);
278}
279
280static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
281 const VarDecl *vd) {
282 QualType t = e->getType();
283
284 // If it's thread_local, emit a call to its wrapper function instead.
285 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
286 cgf.cgm.errorNYI(e->getSourceRange(),
287 "emitGlobalVarDeclLValue: thread_local variable");
288
289 // Check if the variable is marked as declare target with link clause in
290 // device codegen.
291 if (cgf.getLangOpts().OpenMP)
292 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
293
294 // Traditional LLVM codegen handles thread local separately, CIR handles
295 // as part of getAddrOfGlobalVar.
296 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
297
299 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
300 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
301 if (realPtrTy != v.getType())
302 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
303
304 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
305 Address addr(v, realVarTy, alignment);
306 LValue lv;
307 if (vd->getType()->isReferenceType())
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "emitGlobalVarDeclLValue: reference type");
310 else
311 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
313 return lv;
314}
315
316void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
317 bool isVolatile, QualType ty,
318 LValueBaseInfo baseInfo, bool isInit,
319 bool isNontemporal) {
320
321 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
322 // Boolean vectors use `iN` as storage type.
323 if (clangVecTy->isExtVectorBoolType())
324 cgm.errorNYI(addr.getPointer().getLoc(),
325 "emitStoreOfScalar ExtVectorBoolType");
326
327 // Handle vectors of size 3 like size 4 for better performance.
328 const mlir::Type elementType = addr.getElementType();
329 const auto vecTy = cast<cir::VectorType>(elementType);
330
331 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
333 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
334 cgm.errorNYI(addr.getPointer().getLoc(),
335 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
336 }
337
338 value = emitToMemory(value, ty);
339
341 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
342 if (ty->isAtomicType() ||
343 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
344 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
345 return;
346 }
347
348 // Update the alloca with more info on initialization.
349 assert(addr.getPointer() && "expected pointer to exist");
350 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
351 if (currVarDecl && srcAlloca) {
352 const VarDecl *vd = currVarDecl;
353 assert(vd && "VarDecl expected");
354 if (vd->hasInit())
355 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
356 }
357
358 assert(currSrcLoc && "must pass in source location");
359 builder.createStore(*currSrcLoc, value, addr, isVolatile);
360
361 if (isNontemporal) {
362 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
363 return;
364 }
365
367}
368
369// TODO: Replace this with a proper TargetInfo function call.
370/// Helper method to check if the underlying ABI is AAPCS
371static bool isAAPCS(const TargetInfo &targetInfo) {
372 return targetInfo.getABI().starts_with("aapcs");
373}
374
376 LValue dst) {
377
378 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
379 mlir::Type resLTy = convertTypeForMem(dst.getType());
380 Address ptr = dst.getBitFieldAddress();
381
382 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
383 dst.isVolatileQualified() &&
384 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
385
386 mlir::Value dstAddr = dst.getAddress().getPointer();
387
388 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
389 ptr.getElementType(), src.getValue(), info,
390 dst.isVolatileQualified(), useVoaltile);
391}
392
394 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
395
396 // Get the output type.
397 mlir::Type resLTy = convertType(lv.getType());
398 Address ptr = lv.getBitFieldAddress();
399
400 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
401 isAAPCS(cgm.getTarget());
402
403 mlir::Value field =
404 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
405 info, lv.isVolatile(), useVoaltile);
407 return RValue::get(field);
408}
409
411 const FieldDecl *field,
412 mlir::Type fieldType,
413 unsigned index) {
414 mlir::Location loc = getLoc(field->getLocation());
415 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
417 cir::GetMemberOp sea = getBuilder().createGetMember(
418 loc, fieldPtr, base.getPointer(), field->getName(),
419 rec.isUnion() ? field->getFieldIndex() : index);
421 rec.getElementOffset(cgm.getDataLayout().layout, index));
422 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
423}
424
426 const FieldDecl *field) {
427 LValueBaseInfo baseInfo = base.getBaseInfo();
428 const CIRGenRecordLayout &layout =
429 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
430 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
431
433
434 unsigned idx = layout.getCIRFieldNo(field);
435 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
436
437 mlir::Location loc = getLoc(field->getLocation());
438 if (addr.getElementType() != info.storageType)
439 addr = builder.createElementBitCast(loc, addr, info.storageType);
440
441 QualType fieldType =
443 // TODO(cir): Support TBAA for bit fields.
445 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
446 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
447}
448
450 LValueBaseInfo baseInfo = base.getBaseInfo();
451
452 if (field->isBitField())
453 return emitLValueForBitField(base, field);
454
455 QualType fieldType = field->getType();
456 const RecordDecl *rec = field->getParent();
457 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
458 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
460
461 Address addr = base.getAddress();
462 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
463 if (cgm.getCodeGenOpts().StrictVTablePointers &&
464 classDecl->isDynamicClass()) {
465 cgm.errorNYI(field->getSourceRange(),
466 "emitLValueForField: strict vtable for dynamic class");
467 }
468 }
469
470 unsigned recordCVR = base.getVRQualifiers();
471
472 llvm::StringRef fieldName = field->getName();
473 unsigned fieldIndex;
474 if (cgm.lambdaFieldToName.count(field))
475 fieldName = cgm.lambdaFieldToName[field];
476
477 if (rec->isUnion())
478 fieldIndex = field->getFieldIndex();
479 else {
480 const CIRGenRecordLayout &layout =
481 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
482 fieldIndex = layout.getCIRFieldNo(field);
483 }
484
485 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
487
488 // If this is a reference field, load the reference right now.
489 if (fieldType->isReferenceType()) {
491 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
492 if (recordCVR & Qualifiers::Volatile)
493 refLVal.getQuals().addVolatile();
494 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
495 &fieldBaseInfo);
496
497 // Qualifiers on the struct don't apply to the referencee.
498 recordCVR = 0;
499 fieldType = fieldType->getPointeeType();
500 }
501
502 if (field->hasAttr<AnnotateAttr>()) {
503 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
504 return LValue();
505 }
506
507 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
508 lv.getQuals().addCVRQualifiers(recordCVR);
509
510 // __weak attribute on a field is ignored.
512 cgm.errorNYI(field->getSourceRange(),
513 "emitLValueForField: __weak attribute");
514 return LValue();
515 }
516
517 return lv;
518}
519
521 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
522 QualType fieldType = field->getType();
523
524 if (!fieldType->isReferenceType())
525 return emitLValueForField(base, field);
526
527 const CIRGenRecordLayout &layout =
528 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
529 unsigned fieldIndex = layout.getCIRFieldNo(field);
530
531 Address v =
532 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
533
534 // Make sure that the address is pointing to the right type.
535 mlir::Type memTy = convertTypeForMem(fieldType);
536 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
537
538 // TODO: Generate TBAA information that describes this access as a structure
539 // member access and not just an access to an object of the field's type. This
540 // should be similar to what we do in EmitLValueForField().
541 LValueBaseInfo baseInfo = base.getBaseInfo();
542 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
543 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
545 return makeAddrLValue(v, fieldType, fieldBaseInfo);
546}
547
548/// Converts a scalar value from its primary IR type (as returned
549/// by ConvertType) to its load/store type.
550mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
551 if (auto *atomicTy = ty->getAs<AtomicType>())
552 ty = atomicTy->getValueType();
553
554 if (ty->isExtVectorBoolType()) {
555 cgm.errorNYI("emitToMemory: extVectorBoolType");
556 }
557
558 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
559 // kept as `cir.int<N>` until further lowering
560
561 return value;
562}
563
564mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
565 if (auto *atomicTy = ty->getAs<AtomicType>())
566 ty = atomicTy->getValueType();
567
569 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
570 }
571
572 return value;
573}
574
575void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
576 bool isInit) {
577 if (lvalue.getType()->isConstantMatrixType()) {
578 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
579 return;
580 }
581
582 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
583 lvalue.getType(), lvalue.getBaseInfo(), isInit,
584 /*isNontemporal=*/false);
585}
586
587mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
588 QualType ty, SourceLocation loc,
589 LValueBaseInfo baseInfo) {
590 // Traditional LLVM codegen handles thread local separately, CIR handles
591 // as part of getAddrOfGlobalVar (GetGlobalOp).
592 mlir::Type eltTy = addr.getElementType();
593
594 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
595 if (clangVecTy->isExtVectorBoolType()) {
596 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
597 return nullptr;
598 }
599
600 const auto vecTy = cast<cir::VectorType>(eltTy);
601
602 // Handle vectors of size 3 like size 4 for better performance.
604 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
605 cgm.errorNYI(addr.getPointer().getLoc(),
606 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
607 }
608
610 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
611 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
612 cgm.errorNYI("emitLoadOfScalar: load atomic");
613
614 if (mlir::isa<cir::VoidType>(eltTy))
615 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
616
618
619 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
620 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
621 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
622
623 return loadOp;
624}
625
627 SourceLocation loc) {
630 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
631 lvalue.getType(), loc, lvalue.getBaseInfo());
632}
633
634/// Given an expression that represents a value lvalue, this
635/// method emits the address of the lvalue, then loads the result as an rvalue,
636/// returning the rvalue.
638 assert(!lv.getType()->isFunctionType());
639 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
640
641 if (lv.isBitField())
642 return emitLoadOfBitfieldLValue(lv, loc);
643
644 if (lv.isSimple())
645 return RValue::get(emitLoadOfScalar(lv, loc));
646
647 if (lv.isVectorElt()) {
648 const mlir::Value load =
649 builder.createLoad(getLoc(loc), lv.getVectorAddress());
650 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
651 lv.getVectorIdx()));
652 }
653
654 if (lv.isExtVectorElt())
656
657 cgm.errorNYI(loc, "emitLoadOfLValue");
658 return RValue::get(nullptr);
659}
660
661int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
662 const mlir::ArrayAttr elts) {
663 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
664 return elt.getInt();
665}
666
667// If this is a reference to a subset of the elements of a vector, create an
668// appropriate shufflevector.
670 mlir::Location loc = lv.getExtVectorPointer().getLoc();
671 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
672
673 // HLSL allows treating scalars as one-element vectors. Converting the scalar
674 // IR value to a vector here allows the rest of codegen to behave as normal.
675 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
676 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
677 return {};
678 }
679
680 const mlir::ArrayAttr elts = lv.getExtVectorElts();
681
682 // If the result of the expression is a non-vector type, we must be extracting
683 // a single element. Just codegen as an extractelement.
684 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
685 if (!exprVecTy) {
686 int64_t indexValue = getAccessedFieldNo(0, elts);
687 cir::ConstantOp index =
688 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
689 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
690 }
691
692 // Always use shuffle vector to try to retain the original program structure
694 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
695 mask.push_back(getAccessedFieldNo(i, elts));
696
697 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
698 if (lv.getType()->isExtVectorBoolType()) {
699 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
700 return {};
701 }
702
703 return RValue::get(resultVec);
704}
705
706LValue
708 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
709 "unexpected binary operator opcode");
710
711 Address baseAddr = Address::invalid();
712 if (e->getOpcode() == BO_PtrMemD)
713 baseAddr = emitLValue(e->getLHS()).getAddress();
714 else
715 baseAddr = emitPointerWithAlignment(e->getLHS());
716
717 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
718
719 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
720
721 LValueBaseInfo baseInfo;
723 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
724 memberPtrTy, &baseInfo);
725
726 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
727}
728
729/// Generates lvalue for partial ext_vector access.
731 mlir::Location loc) {
732 Address vectorAddress = lv.getExtVectorAddress();
733 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
734 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
735 Address castToPointerElement =
736 vectorAddress.withElementType(builder, vectorElementTy);
737
738 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
739 unsigned idx = getAccessedFieldNo(0, extVecElts);
740 mlir::Value idxValue =
741 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
742
743 mlir::Value elementValue = builder.getArrayElement(
744 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
745 /*shouldDecay=*/false);
746
747 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
748 const CharUnits alignment =
749 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
750 return Address(elementValue, vectorElementTy, alignment);
751}
752
753static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
755 return cgm.getAddrOfFunction(gd);
756}
757
759 mlir::Value thisValue) {
760 return cgf.emitLValueForLambdaField(fd, thisValue);
761}
762
763/// Given that we are currently emitting a lambda, emit an l-value for
764/// one of its members.
765///
767 mlir::Value thisValue) {
768 bool hasExplicitObjectParameter = false;
769 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
770 LValue lambdaLV;
771 if (methD) {
772 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
773 assert(methD->getParent()->isLambda());
774 assert(methD->getParent() == field->getParent());
775 }
776 if (hasExplicitObjectParameter) {
777 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
778 } else {
779 QualType lambdaTagType =
781 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
782 }
783 return emitLValueForField(lambdaLV, field);
784}
785
789
790static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
791 GlobalDecl gd) {
792 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
793 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
794 mlir::Location loc = cgf.getLoc(e->getSourceRange());
795 CharUnits align = cgf.getContext().getDeclAlign(fd);
796
798
799 mlir::Type fnTy = funcOp.getFunctionType();
800 mlir::Type ptrTy = cir::PointerType::get(fnTy);
801 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
802 funcOp.getSymName());
803
804 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
805 fnTy = cgf.convertType(fd->getType());
806 ptrTy = cir::PointerType::get(fnTy);
807
808 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
809 cir::CastKind::bitcast, addr);
810 }
811
812 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
814}
815
816/// Determine whether we can emit a reference to \p vd from the current
817/// context, despite not necessarily having seen an odr-use of the variable in
818/// this context.
819/// TODO(cir): This could be shared with classic codegen.
821 const DeclRefExpr *e,
822 const VarDecl *vd) {
823 // For a variable declared in an enclosing scope, do not emit a spurious
824 // reference even if we have a capture, as that will emit an unwarranted
825 // reference to our capture state, and will likely generate worse code than
826 // emitting a local copy.
828 return false;
829
830 // For a local declaration declared in this function, we can always reference
831 // it even if we don't have an odr-use.
832 if (vd->hasLocalStorage()) {
833 return vd->getDeclContext() ==
834 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
835 }
836
837 // For a global declaration, we can emit a reference to it if we know
838 // for sure that we are able to emit a definition of it.
839 vd = vd->getDefinition(cgf.getContext());
840 if (!vd)
841 return false;
842
843 // Don't emit a spurious reference if it might be to a variable that only
844 // exists on a different device / target.
845 // FIXME: This is unnecessarily broad. Check whether this would actually be a
846 // cross-target reference.
847 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
848 cgf.getLangOpts().OpenCL) {
849 return false;
850 }
851
852 // We can emit a spurious reference only if the linkage implies that we'll
853 // be emitting a non-interposable symbol that will be retained until link
854 // time.
855 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
856 case cir::GlobalLinkageKind::ExternalLinkage:
857 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
858 case cir::GlobalLinkageKind::WeakODRLinkage:
859 case cir::GlobalLinkageKind::InternalLinkage:
860 case cir::GlobalLinkageKind::PrivateLinkage:
861 return true;
862 default:
863 return false;
864 }
865}
866
868 const NamedDecl *nd = e->getDecl();
869 QualType ty = e->getType();
870
871 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
872 "should not emit an unevaluated operand");
873
874 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
875 // Global Named registers access via intrinsics only
876 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
877 !vd->isLocalVarDecl()) {
878 cgm.errorNYI(e->getSourceRange(),
879 "emitDeclRefLValue: Global Named registers access");
880 return LValue();
881 }
882
883 if (e->isNonOdrUse() == NOUR_Constant &&
884 (vd->getType()->isReferenceType() ||
885 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
886 vd->getAnyInitializer(vd);
887 mlir::Attribute val = ConstantEmitter(*this).emitAbstract(
888 e->getLocation(), *vd->evaluateValue(), vd->getType());
889 assert(val && "failed to emit constant expression");
890
891 Address addr = Address::invalid();
892 if (!vd->getType()->isReferenceType()) {
893 // Spill the constant value to a global.
894 addr = cgm.createUnnamedGlobalFrom(*vd, val,
895 getContext().getDeclAlign(vd));
896 mlir::Type varTy = getTypes().convertTypeForMem(vd->getType());
897 auto ptrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
898 if (ptrTy.getPointee() != varTy) {
899 addr = addr.withElementType(builder, varTy);
900 }
901 } else {
902 cgm.errorNYI(e->getSourceRange(),
903 "emitDeclRefLValue: non-odr reference type");
904 }
905 return makeAddrLValue(addr, ty, AlignmentSource::Decl);
906 }
907
908 // Check for captured variables.
910 vd = vd->getCanonicalDecl();
911 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
912 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
915 }
916 }
917
918 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
919 // Checks for omitted feature handling
926
927 // Check if this is a global variable
928 if (vd->hasLinkage() || vd->isStaticDataMember())
929 return emitGlobalVarDeclLValue(*this, e, vd);
930
931 Address addr = Address::invalid();
932
933 // The variable should generally be present in the local decl map.
934 auto iter = localDeclMap.find(vd);
935 if (iter != localDeclMap.end()) {
936 addr = iter->second;
937 } else {
938 // Otherwise, it might be static local we haven't emitted yet for some
939 // reason; most likely, because it's in an outer function.
940 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
941 }
942
943 // Drill into reference types.
944 LValue lv =
945 vd->getType()->isReferenceType()
949
950 // Statics are defined as globals, so they are not include in the function's
951 // symbol table.
952 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
953 "non-static locals should be already mapped");
954
955 return lv;
956 }
957
958 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
961 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
962 return LValue();
963 }
964 return emitLValue(bd->getBinding());
965 }
966
967 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
968 LValue lv = emitFunctionDeclLValue(*this, e, fd);
969
970 // Emit debuginfo for the function declaration if the target wants to.
971 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
973
974 return lv;
975 }
976
977 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
978 return LValue();
979}
980
982 QualType boolTy = getContext().BoolTy;
983 SourceLocation loc = e->getExprLoc();
984
986 if (e->getType()->getAs<MemberPointerType>()) {
987 cgm.errorNYI(e->getSourceRange(),
988 "evaluateExprAsBool: member pointer type");
989 return createDummyValue(getLoc(loc), boolTy);
990 }
991
993 if (!e->getType()->isAnyComplexType())
994 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
995
997 loc);
998}
999
1001 UnaryOperatorKind op = e->getOpcode();
1002
1003 // __extension__ doesn't affect lvalue-ness.
1004 if (op == UO_Extension)
1005 return emitLValue(e->getSubExpr());
1006
1007 switch (op) {
1008 case UO_Deref: {
1010 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1011
1013 LValueBaseInfo baseInfo;
1014 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
1015
1016 // Tag 'load' with deref attribute.
1017 // FIXME: This misses some derefence cases and has problematic interactions
1018 // with other operators.
1019 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1020 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1021
1022 LValue lv = makeAddrLValue(addr, t, baseInfo);
1025 return lv;
1026 }
1027 case UO_Real:
1028 case UO_Imag: {
1029 LValue lv = emitLValue(e->getSubExpr());
1030 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1031
1032 // __real is valid on scalars. This is a faster way of testing that.
1033 // __imag can only produce an rvalue on scalars.
1034 if (e->getOpcode() == UO_Real &&
1035 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1036 assert(e->getSubExpr()->getType()->isArithmeticType());
1037 return lv;
1038 }
1039
1041 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1042 mlir::Location loc = getLoc(e->getExprLoc());
1043 Address component =
1044 e->getOpcode() == UO_Real
1045 ? builder.createComplexRealPtr(loc, lv.getAddress())
1046 : builder.createComplexImagPtr(loc, lv.getAddress());
1048 LValue elemLV = makeAddrLValue(component, elemTy);
1049 elemLV.getQuals().addQualifiers(lv.getQuals());
1050 return elemLV;
1051 }
1052 case UO_PreInc:
1053 case UO_PreDec: {
1054 cir::UnaryOpKind kind =
1055 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
1056 LValue lv = emitLValue(e->getSubExpr());
1057
1058 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1059
1060 if (e->getType()->isAnyComplexType()) {
1061 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
1062 } else {
1063 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
1064 }
1065
1066 return lv;
1067 }
1068 case UO_Extension:
1069 llvm_unreachable("UnaryOperator extension should be handled above!");
1070 case UO_Plus:
1071 case UO_Minus:
1072 case UO_Not:
1073 case UO_LNot:
1074 case UO_AddrOf:
1075 case UO_PostInc:
1076 case UO_PostDec:
1077 case UO_Coawait:
1078 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1079 }
1080 llvm_unreachable("Unknown unary operator kind!");
1081}
1082
1083/// If the specified expr is a simple decay from an array to pointer,
1084/// return the array subexpression.
1085/// FIXME: this could be abstracted into a common AST helper.
1086static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1087 // If this isn't just an array->pointer decay, bail out.
1088 const auto *castExpr = dyn_cast<CastExpr>(e);
1089 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1090 return nullptr;
1091
1092 // If this is a decay from variable width array, bail out.
1093 const Expr *subExpr = castExpr->getSubExpr();
1094 if (subExpr->getType()->isVariableArrayType())
1095 return nullptr;
1096
1097 return subExpr;
1098}
1099
1100static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1101 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1102 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1103 return constantOp.getValueAttr<cir::IntAttr>();
1104 return {};
1105}
1106
1107static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1108 CharUnits eltSize) {
1109 // If we have a constant index, we can use the exact offset of the
1110 // element we're accessing.
1111 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1112 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1113 return arrayAlign.alignmentAtOffset(offset);
1114 }
1115 // Otherwise, use the worst-case alignment for any element.
1116 return arrayAlign.alignmentOfArrayElement(eltSize);
1117}
1118
1120 const VariableArrayType *vla) {
1121 QualType eltType;
1122 do {
1123 eltType = vla->getElementType();
1124 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1125 return eltType;
1126}
1127
1129 mlir::Location beginLoc,
1130 mlir::Location endLoc, mlir::Value ptr,
1131 mlir::Type eltTy, mlir::Value idx,
1132 bool shouldDecay) {
1133 CIRGenModule &cgm = cgf.getCIRGenModule();
1134 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1135 // that would enhance tracking this later in CIR?
1137 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1138 shouldDecay);
1139}
1140
1142 mlir::Location beginLoc,
1143 mlir::Location endLoc, Address addr,
1144 QualType eltType, mlir::Value idx,
1145 mlir::Location loc, bool shouldDecay) {
1146
1147 // Determine the element size of the statically-sized base. This is
1148 // the thing that the indices are expressed in terms of.
1149 if (const VariableArrayType *vla =
1150 cgf.getContext().getAsVariableArrayType(eltType)) {
1151 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1152 }
1153
1154 // We can use that to compute the best alignment of the element.
1155 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1156 const CharUnits eltAlign =
1157 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1158
1160 const mlir::Value eltPtr =
1161 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1162 addr.getElementType(), idx, shouldDecay);
1163 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1164 return Address(eltPtr, elementType, eltAlign);
1165}
1166
1167LValue
1169 if (e->getType()->getAs<ObjCObjectType>()) {
1170 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1172 }
1173
1174 // The index must always be an integer, which is not an aggregate. Emit it
1175 // in lexical order (this complexity is, sadly, required by C++17).
1176 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1177 "index was neither LHS nor RHS");
1178
1179 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1180 const mlir::Value idx = emitScalarExpr(e->getIdx());
1181
1182 // Extend or truncate the index type to 32 or 64-bits.
1183 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1184 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1185 cgm.errorNYI(e->getSourceRange(),
1186 "emitArraySubscriptExpr: index type cast");
1187 return idx;
1188 };
1189
1190 // If the base is a vector type, then we are forming a vector element
1191 // with this subscript.
1192 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1194 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1195 const LValue lv = emitLValue(e->getBase());
1196 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1197 lv.getBaseInfo());
1198 }
1199
1200 // The HLSL runtime handles subscript expressions on global resource arrays
1201 // and objects with HLSL buffer layouts.
1202 if (getLangOpts().HLSL) {
1203 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1204 return {};
1205 }
1206
1207 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1208
1209 // Handle the extvector case we ignored above.
1211 const LValue lv = emitLValue(e->getBase());
1212 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1213
1214 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1215 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1216 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1217 idx, cgm.getLoc(e->getExprLoc()),
1218 /*shouldDecay=*/false);
1219
1220 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1221 }
1222
1223 if (const VariableArrayType *vla =
1224 getContext().getAsVariableArrayType(e->getType())) {
1225 // The base must be a pointer, which is not an aggregate. Emit
1226 // it. It needs to be emitted first in case it's what captures
1227 // the VLA bounds.
1229
1230 // The element count here is the total number of non-VLA elements.
1231 mlir::Value numElements = getVLASize(vla).numElts;
1232 idx = builder.createIntCast(idx, numElements.getType());
1233
1234 // Effectively, the multiply by the VLA size is part of the GEP.
1235 // GEP indexes are signed, and scaling an index isn't permitted to
1236 // signed-overflow, so we use the same semantics for our explicit
1237 // multiply. We suppress this if overflow is not undefined behavior.
1238 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1241 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1242 overflowBehavior);
1243
1244 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1245 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1246 idx, cgm.getLoc(e->getExprLoc()),
1247 /*shouldDecay=*/false);
1248
1249 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1250 }
1251
1252 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1253 LValue arrayLV;
1254 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1255 arrayLV = emitArraySubscriptExpr(ase);
1256 else
1257 arrayLV = emitLValue(array);
1258
1259 // Propagate the alignment from the array itself to the result.
1260 const Address addr = emitArraySubscriptPtr(
1261 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1262 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1263 /*shouldDecay=*/true);
1264
1265 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1266
1267 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1268 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1269 }
1270
1271 return lv;
1272 }
1273
1274 // The base must be a pointer; emit it with an estimate of its alignment.
1275 assert(e->getBase()->getType()->isPointerType() &&
1276 "The base must be a pointer");
1277
1278 LValueBaseInfo eltBaseInfo;
1279 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1280 // Propagate the alignment from the array itself to the result.
1281 const Address addxr = emitArraySubscriptPtr(
1282 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1283 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1284 /*shouldDecay=*/false);
1285
1286 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1287
1288 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1289 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1290 }
1291
1292 return lv;
1293}
1294
1296 // Emit the base vector as an l-value.
1297 LValue base;
1298
1299 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1300 if (e->isArrow()) {
1301 // If it is a pointer to a vector, emit the address and form an lvalue with
1302 // it.
1303 LValueBaseInfo baseInfo;
1304 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1305 const auto *clangPtrTy =
1307 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1308 base.getQuals().removeObjCGCAttr();
1309 } else if (e->getBase()->isGLValue()) {
1310 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1311 // emit the base as an lvalue.
1312 assert(e->getBase()->getType()->isVectorType());
1313 base = emitLValue(e->getBase());
1314 } else {
1315 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1316 assert(e->getBase()->getType()->isVectorType() &&
1317 "Result must be a vector");
1318 mlir::Value vec = emitScalarExpr(e->getBase());
1319
1320 // Store the vector to memory (because LValue wants an address).
1321 QualType baseTy = e->getBase()->getType();
1322 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1323 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1324 cgm.errorNYI(e->getSourceRange(),
1325 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1326 return {};
1327 }
1328 builder.createStore(vec.getLoc(), vec, vecMem);
1329 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1330 }
1331
1332 QualType type =
1334
1335 // Encode the element access list into a vector of unsigned indices.
1337 e->getEncodedElementAccess(indices);
1338
1339 if (base.isSimple()) {
1340 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1341 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1342 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1343 base.getBaseInfo());
1344 }
1345
1346 cgm.errorNYI(e->getSourceRange(),
1347 "emitExtVectorElementExpr: isSimple is false");
1348 return {};
1349}
1350
1352 llvm::StringRef name) {
1353 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1354 assert(globalOp.getAlignment() && "expected alignment for string literal");
1355 unsigned align = *(globalOp.getAlignment());
1356 mlir::Value addr =
1357 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1358 return makeAddrLValue(
1359 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1361}
1362
1363/// Casts are never lvalues unless that cast is to a reference type. If the cast
1364/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1365/// is needed by the code generator in an lvalue context, then it must mean that
1366/// we need the address of an aggregate in order to access one of its members.
1367/// This can happen for all the reasons that casts are permitted with aggregate
1368/// result, including noop aggregate casts, and cast from scalar to union.
1370 switch (e->getCastKind()) {
1371 case CK_ToVoid:
1372 case CK_BitCast:
1373 case CK_LValueToRValueBitCast:
1374 case CK_ArrayToPointerDecay:
1375 case CK_FunctionToPointerDecay:
1376 case CK_NullToMemberPointer:
1377 case CK_NullToPointer:
1378 case CK_IntegralToPointer:
1379 case CK_PointerToIntegral:
1380 case CK_PointerToBoolean:
1381 case CK_IntegralCast:
1382 case CK_BooleanToSignedIntegral:
1383 case CK_IntegralToBoolean:
1384 case CK_IntegralToFloating:
1385 case CK_FloatingToIntegral:
1386 case CK_FloatingToBoolean:
1387 case CK_FloatingCast:
1388 case CK_FloatingRealToComplex:
1389 case CK_FloatingComplexToReal:
1390 case CK_FloatingComplexToBoolean:
1391 case CK_FloatingComplexCast:
1392 case CK_FloatingComplexToIntegralComplex:
1393 case CK_IntegralRealToComplex:
1394 case CK_IntegralComplexToReal:
1395 case CK_IntegralComplexToBoolean:
1396 case CK_IntegralComplexCast:
1397 case CK_IntegralComplexToFloatingComplex:
1398 case CK_DerivedToBaseMemberPointer:
1399 case CK_BaseToDerivedMemberPointer:
1400 case CK_MemberPointerToBoolean:
1401 case CK_ReinterpretMemberPointer:
1402 case CK_AnyPointerToBlockPointerCast:
1403 case CK_ARCProduceObject:
1404 case CK_ARCConsumeObject:
1405 case CK_ARCReclaimReturnedObject:
1406 case CK_ARCExtendBlockObject:
1407 case CK_CopyAndAutoreleaseBlockObject:
1408 case CK_IntToOCLSampler:
1409 case CK_FloatingToFixedPoint:
1410 case CK_FixedPointToFloating:
1411 case CK_FixedPointCast:
1412 case CK_FixedPointToBoolean:
1413 case CK_FixedPointToIntegral:
1414 case CK_IntegralToFixedPoint:
1415 case CK_MatrixCast:
1416 case CK_HLSLVectorTruncation:
1417 case CK_HLSLMatrixTruncation:
1418 case CK_HLSLArrayRValue:
1419 case CK_HLSLElementwiseCast:
1420 case CK_HLSLAggregateSplatCast:
1421 llvm_unreachable("unexpected cast lvalue");
1422
1423 case CK_Dependent:
1424 llvm_unreachable("dependent cast kind in IR gen!");
1425
1426 case CK_BuiltinFnToFnPtr:
1427 llvm_unreachable("builtin functions are handled elsewhere");
1428
1429 case CK_Dynamic: {
1430 LValue lv = emitLValue(e->getSubExpr());
1431 Address v = lv.getAddress();
1432 const auto *dce = cast<CXXDynamicCastExpr>(e);
1434 }
1435
1436 // These are never l-values; just use the aggregate emission code.
1437 case CK_NonAtomicToAtomic:
1438 case CK_AtomicToNonAtomic:
1439 case CK_ToUnion:
1440 case CK_ObjCObjectLValueCast:
1441 case CK_VectorSplat:
1442 case CK_ConstructorConversion:
1443 case CK_UserDefinedConversion:
1444 case CK_CPointerToObjCPointerCast:
1445 case CK_BlockPointerToObjCPointerCast:
1446 case CK_LValueToRValue: {
1447 cgm.errorNYI(e->getSourceRange(),
1448 std::string("emitCastLValue for unhandled cast kind: ") +
1449 e->getCastKindName());
1450
1451 return {};
1452 }
1453 case CK_AddressSpaceConversion: {
1454 LValue lv = emitLValue(e->getSubExpr());
1455 QualType destTy = getContext().getPointerType(e->getType());
1456
1457 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1458 cir::TargetAddressSpaceAttr srcAS;
1459 if (clang::isTargetAddressSpace(srcLangAS))
1460 srcAS = cir::toCIRTargetAddressSpace(getMLIRContext(), srcLangAS);
1461 else
1462 cgm.errorNYI(
1463 e->getSourceRange(),
1464 "emitCastLValue: address space conversion from unknown address "
1465 "space");
1466
1467 mlir::Value v = getTargetHooks().performAddrSpaceCast(
1468 *this, lv.getPointer(), srcAS, convertType(destTy));
1469
1471 lv.getAddress().getAlignment()),
1472 e->getType(), lv.getBaseInfo());
1473 }
1474
1475 case CK_LValueBitCast: {
1476 // This must be a reinterpret_cast (or c-style equivalent).
1477 const auto *ce = cast<ExplicitCastExpr>(e);
1478
1479 cgm.emitExplicitCastExprType(ce, this);
1480 LValue LV = emitLValue(e->getSubExpr());
1482 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1483
1484 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1485 }
1486
1487 case CK_NoOp: {
1488 // CK_NoOp can model a qualification conversion, which can remove an array
1489 // bound and change the IR type.
1490 LValue lv = emitLValue(e->getSubExpr());
1491 // Propagate the volatile qualifier to LValue, if exists in e.
1493 cgm.errorNYI(e->getSourceRange(),
1494 "emitCastLValue: NoOp changes volatile qual");
1495 if (lv.isSimple()) {
1496 Address v = lv.getAddress();
1497 if (v.isValid()) {
1498 mlir::Type ty = convertTypeForMem(e->getType());
1499 if (v.getElementType() != ty)
1500 cgm.errorNYI(e->getSourceRange(),
1501 "emitCastLValue: NoOp needs bitcast");
1502 }
1503 }
1504 return lv;
1505 }
1506
1507 case CK_UncheckedDerivedToBase:
1508 case CK_DerivedToBase: {
1509 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1510
1511 LValue lv = emitLValue(e->getSubExpr());
1512 Address thisAddr = lv.getAddress();
1513
1514 // Perform the derived-to-base conversion
1515 Address baseAddr =
1516 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1517 /*NullCheckValue=*/false, e->getExprLoc());
1518
1519 // TODO: Support accesses to members of base classes in TBAA. For now, we
1520 // conservatively pretend that the complete object is of the base class
1521 // type.
1523 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1524 }
1525
1526 case CK_BaseToDerived: {
1527 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1528 LValue lv = emitLValue(e->getSubExpr());
1529
1530 // Perform the base-to-derived conversion
1532 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1533 e->path(), /*NullCheckValue=*/false);
1534 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1535 // performed and the object is not of the derived type.
1537
1539 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1540 }
1541
1542 case CK_ZeroToOCLOpaqueType:
1543 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1544 }
1545
1546 llvm_unreachable("Invalid cast kind");
1547}
1548
1550 const MemberExpr *me) {
1551 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1552 // Try to emit static variable member expressions as DREs.
1553 return DeclRefExpr::Create(
1555 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1556 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1557 }
1558 return nullptr;
1559}
1560
1562 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1564 return emitDeclRefLValue(dre);
1565 }
1566
1567 Expr *baseExpr = e->getBase();
1568 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1569 LValue baseLV;
1570 if (e->isArrow()) {
1571 LValueBaseInfo baseInfo;
1573 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1574 QualType ptrTy = baseExpr->getType()->getPointeeType();
1576 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1577 } else {
1579 baseLV = emitLValue(baseExpr);
1580 }
1581
1582 const NamedDecl *nd = e->getMemberDecl();
1583 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1584 LValue lv = emitLValueForField(baseLV, field);
1586 if (getLangOpts().OpenMP) {
1587 // If the member was explicitly marked as nontemporal, mark it as
1588 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1589 // to children as nontemporal too.
1590 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1591 }
1592 return lv;
1593 }
1594
1595 if (isa<FunctionDecl>(nd)) {
1596 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1597 return LValue();
1598 }
1599
1600 llvm_unreachable("Unhandled member declaration!");
1601}
1602
1603/// Evaluate an expression into a given memory location.
1605 Qualifiers quals, bool isInit) {
1606 // FIXME: This function should take an LValue as an argument.
1607 switch (getEvaluationKind(e->getType())) {
1608 case cir::TEK_Complex: {
1609 LValue lv = makeAddrLValue(location, e->getType());
1610 emitComplexExprIntoLValue(e, lv, isInit);
1611 return;
1612 }
1613
1614 case cir::TEK_Aggregate: {
1615 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1619 return;
1620 }
1621
1622 case cir::TEK_Scalar: {
1624 LValue lv = makeAddrLValue(location, e->getType());
1625 emitStoreThroughLValue(rv, lv);
1626 return;
1627 }
1628 }
1629
1630 llvm_unreachable("bad evaluation kind");
1631}
1632
1634 const MaterializeTemporaryExpr *m,
1635 const Expr *inner) {
1636 // TODO(cir): cgf.getTargetHooks();
1637 switch (m->getStorageDuration()) {
1638 case SD_FullExpression:
1639 case SD_Automatic: {
1640 QualType ty = inner->getType();
1641
1643
1644 // The temporary memory should be created in the same scope as the extending
1645 // declaration of the temporary materialization expression.
1646 cir::AllocaOp extDeclAlloca;
1647 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1648 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1649 if (extDeclAddrIter != cgf.localDeclMap.end())
1650 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1651 }
1652 mlir::OpBuilder::InsertPoint ip;
1653 if (extDeclAlloca)
1654 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1655 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1656 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1657 ip);
1658 }
1659 case SD_Thread:
1660 case SD_Static: {
1661 cgf.cgm.errorNYI(
1662 m->getSourceRange(),
1663 "createReferenceTemporary: static/thread storage duration");
1664 return Address::invalid();
1665 }
1666
1667 case SD_Dynamic:
1668 llvm_unreachable("temporary can't have dynamic storage duration");
1669 }
1670 llvm_unreachable("unknown storage duration");
1671}
1672
1674 const MaterializeTemporaryExpr *m,
1675 const Expr *e, Address referenceTemporary) {
1676 // Objective-C++ ARC:
1677 // If we are binding a reference to a temporary that has ownership, we
1678 // need to perform retain/release operations on the temporary.
1679 //
1680 // FIXME(ogcg): This should be looking at e, not m.
1681 if (m->getType().getObjCLifetime()) {
1682 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1683 return;
1684 }
1685
1687 if (dk == QualType::DK_none)
1688 return;
1689
1690 switch (m->getStorageDuration()) {
1691 case SD_Static:
1692 case SD_Thread: {
1693 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1694 if (const auto *classDecl =
1696 classDecl && !classDecl->hasTrivialDestructor())
1697 // Get the destructor for the reference temporary.
1698 referenceTemporaryDtor = classDecl->getDestructor();
1699
1700 if (!referenceTemporaryDtor)
1701 return;
1702
1703 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1704 "storage duration with destructors");
1705 break;
1706 }
1707
1708 case SD_FullExpression:
1709 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1711 break;
1712
1713 case SD_Automatic:
1714 cgf.cgm.errorNYI(e->getSourceRange(),
1715 "pushTemporaryCleanup: automatic storage duration");
1716 break;
1717
1718 case SD_Dynamic:
1719 llvm_unreachable("temporary cannot have dynamic storage duration");
1720 }
1721}
1722
1724 const MaterializeTemporaryExpr *m) {
1725 const Expr *e = m->getSubExpr();
1726
1727 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1728 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1729 "Reference should never be pseudo-strong!");
1730
1731 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1732 // as that will cause the lifetime adjustment to be lost for ARC
1733 auto ownership = m->getType().getObjCLifetime();
1734 if (ownership != Qualifiers::OCL_None &&
1735 ownership != Qualifiers::OCL_ExplicitNone) {
1736 cgm.errorNYI(e->getSourceRange(),
1737 "emitMaterializeTemporaryExpr: ObjCLifetime");
1738 return {};
1739 }
1740
1743 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1744
1745 for (const Expr *ignored : commaLHSs)
1746 emitIgnoredExpr(ignored);
1747
1748 if (isa<OpaqueValueExpr>(e)) {
1749 cgm.errorNYI(e->getSourceRange(),
1750 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1751 return {};
1752 }
1753
1754 // Create and initialize the reference temporary.
1755 Address object = createReferenceTemporary(*this, m, e);
1756
1757 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1758 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1759 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1760 return {};
1761 } else {
1763 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1764 }
1765 pushTemporaryCleanup(*this, m, e, object);
1766
1767 // Perform derived-to-base casts and/or field accesses, to get from the
1768 // temporary object we created (and, potentially, for which we extended
1769 // the lifetime) to the subobject we're binding the reference to.
1770 if (!adjustments.empty()) {
1771 cgm.errorNYI(e->getSourceRange(),
1772 "emitMaterializeTemporaryExpr: Adjustments");
1773 return {};
1774 }
1775
1776 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1777}
1778
1779LValue
1782
1783 auto it = opaqueLValues.find(e);
1784 if (it != opaqueLValues.end())
1785 return it->second;
1786
1787 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1788 return emitLValue(e->getSourceExpr());
1789}
1790
1791RValue
1794
1795 auto it = opaqueRValues.find(e);
1796 if (it != opaqueRValues.end())
1797 return it->second;
1798
1799 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1800 return emitAnyExpr(e->getSourceExpr());
1801}
1802
1804 if (e->isFileScope()) {
1805 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1806 return {};
1807 }
1808
1809 if (e->getType()->isVariablyModifiedType())
1811
1812 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1813 ".compoundliteral");
1814 const Expr *initExpr = e->getInitializer();
1815 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1816
1817 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1818 /*Init*/ true);
1819
1820 // Block-scope compound literals are destroyed at the end of the enclosing
1821 // scope in C.
1822 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1823 cgm.errorNYI(e->getSourceRange(),
1824 "emitCompoundLiteralLValue: non C++ DestructedType");
1825 return {};
1826 }
1827
1828 return result;
1829}
1830
1832 RValue rv = emitCallExpr(e);
1833
1834 if (!rv.isScalar()) {
1835 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1836 return {};
1837 }
1838
1839 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1840 "Can't have a scalar return unless the return type is a "
1841 "reference type!");
1842
1844}
1845
1847 // Comma expressions just emit their LHS then their RHS as an l-value.
1848 if (e->getOpcode() == BO_Comma) {
1849 emitIgnoredExpr(e->getLHS());
1850 return emitLValue(e->getRHS());
1851 }
1852
1853 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1855
1856 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1857
1858 // Note that in all of these cases, __block variables need the RHS
1859 // evaluated first just in case the variable gets moved by the RHS.
1860
1862 case cir::TEK_Scalar: {
1864 if (e->getLHS()->getType().getObjCLifetime() !=
1866 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1867 return {};
1868 }
1869
1870 RValue rv = emitAnyExpr(e->getRHS());
1871 LValue lv = emitLValue(e->getLHS());
1872
1873 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1874 if (lv.isBitField())
1876 else
1877 emitStoreThroughLValue(rv, lv);
1878
1879 if (getLangOpts().OpenMP) {
1880 cgm.errorNYI(e->getSourceRange(), "openmp");
1881 return {};
1882 }
1883
1884 return lv;
1885 }
1886
1887 case cir::TEK_Complex: {
1889 }
1890
1891 case cir::TEK_Aggregate:
1892 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1893 return {};
1894 }
1895 llvm_unreachable("bad evaluation kind");
1896}
1897
1898/// Emit code to compute the specified expression which
1899/// can have any type. The result is returned as an RValue struct.
1901 bool ignoreResult) {
1903 case cir::TEK_Scalar:
1904 return RValue::get(emitScalarExpr(e, ignoreResult));
1905 case cir::TEK_Complex:
1907 case cir::TEK_Aggregate: {
1908 if (!ignoreResult && aggSlot.isIgnored())
1909 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1911 emitAggExpr(e, aggSlot);
1912 return aggSlot.asRValue();
1913 }
1914 }
1915 llvm_unreachable("bad evaluation kind");
1916}
1917
1918// Detect the unusual situation where an inline version is shadowed by a
1919// non-inline version. In that case we should pick the external one
1920// everywhere. That's GCC behavior too.
1922 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1923 if (!pd->isInlineBuiltinDeclaration())
1924 return false;
1925 return true;
1926}
1927
1928CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1929 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1930
1931 if (unsigned builtinID = fd->getBuiltinID()) {
1932 StringRef ident = cgm.getMangledName(gd);
1933 std::string fdInlineName = (ident + ".inline").str();
1934
1935 bool isPredefinedLibFunction =
1936 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1937 // Assume nobuiltins everywhere until we actually read the attributes.
1938 bool hasAttributeNoBuiltin = true;
1940
1941 // When directing calling an inline builtin, call it through it's mangled
1942 // name to make it clear it's not the actual builtin.
1943 auto fn = cast<cir::FuncOp>(curFn);
1944 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1945 cir::FuncOp clone =
1946 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1947
1948 if (!clone) {
1949 // Create a forward declaration - the body will be generated in
1950 // generateCode when the function definition is processed
1951 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1952 mlir::OpBuilder::InsertionGuard guard(builder);
1953 builder.setInsertionPointToStart(cgm.getModule().getBody());
1954
1955 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1956 calleeFunc.getFunctionType());
1957 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1958 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1959 clone.setSymVisibility("private");
1960 clone.setInlineKind(cir::InlineKind::AlwaysInline);
1961 }
1962 return CIRGenCallee::forDirect(clone, gd);
1963 }
1964
1965 // Replaceable builtins provide their own implementation of a builtin. If we
1966 // are in an inline builtin implementation, avoid trivial infinite
1967 // recursion. Honor __attribute__((no_builtin("foo"))) or
1968 // __attribute__((no_builtin)) on the current function unless foo is
1969 // not a predefined library function which means we must generate the
1970 // builtin no matter what.
1971 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1972 return CIRGenCallee::forBuiltin(builtinID, fd);
1973 }
1974
1975 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1976
1977 assert(!cir::MissingFeatures::hip());
1978
1979 return CIRGenCallee::forDirect(callee, gd);
1980}
1981
1983 if (ty->isVoidType())
1984 return RValue::get(nullptr);
1985
1986 cgm.errorNYI("unsupported type for undef rvalue");
1987 return RValue::get(nullptr);
1988}
1989
1991 const CIRGenCallee &origCallee,
1992 const clang::CallExpr *e,
1994 // Get the actual function type. The callee type will always be a pointer to
1995 // function type or a block pointer type.
1996 assert(calleeTy->isFunctionPointerType() &&
1997 "Callee must have function pointer type!");
1998
1999 calleeTy = getContext().getCanonicalType(calleeTy);
2000 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
2001
2002 CIRGenCallee callee = origCallee;
2003
2004 if (getLangOpts().CPlusPlus)
2006
2007 const auto *fnType = cast<FunctionType>(pointeeTy);
2008
2010
2011 CallArgList args;
2013
2014 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
2015 e->getDirectCallee());
2016
2017 const CIRGenFunctionInfo &funcInfo =
2018 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2019
2020 // C99 6.5.2.2p6:
2021 // If the expression that denotes the called function has a type that does
2022 // not include a prototype, [the default argument promotions are performed].
2023 // If the number of arguments does not equal the number of parameters, the
2024 // behavior is undefined. If the function is defined with a type that
2025 // includes a prototype, and either the prototype ends with an ellipsis (,
2026 // ...) or the types of the arguments after promotion are not compatible
2027 // with the types of the parameters, the behavior is undefined. If the
2028 // function is defined with a type that does not include a prototype, and
2029 // the types of the arguments after promotion are not compatible with those
2030 // of the parameters after promotion, the behavior is undefined [except in
2031 // some trivial cases].
2032 // That is, in the general case, we should assume that a call through an
2033 // unprototyped function type works like a *non-variadic* call. The way we
2034 // make this work is to cast to the exxact type fo the promoted arguments.
2035 if (isa<FunctionNoProtoType>(fnType)) {
2038 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2039 // get non-variadic function type
2040 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2041 calleeTy.getReturnType(), false);
2042 auto calleePtrTy = cir::PointerType::get(calleeTy);
2043
2044 mlir::Operation *fn = callee.getFunctionPointer();
2045 mlir::Value addr;
2046 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2047 addr = cir::GetGlobalOp::create(
2048 builder, getLoc(e->getSourceRange()),
2049 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2050 } else {
2051 addr = fn->getResult(0);
2052 }
2053
2054 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2055 callee.setFunctionPointer(fn);
2056 }
2057
2059 assert(!cir::MissingFeatures::hip());
2061
2062 cir::CIRCallOpInterface callOp;
2063 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2064 getLoc(e->getExprLoc()));
2065
2067
2068 return callResult;
2069}
2070
2072 e = e->IgnoreParens();
2073
2074 // Look through function-to-pointer decay.
2075 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2076 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2077 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2078 return emitCallee(implicitCast->getSubExpr());
2079 }
2080 // When performing an indirect call through a function pointer lvalue, the
2081 // function pointer lvalue is implicitly converted to an rvalue through an
2082 // lvalue-to-rvalue conversion.
2083 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2084 "unexpected implicit cast on function pointers");
2085 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2086 // Resolve direct calls.
2087 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2088 return emitDirectCallee(funcDecl);
2089 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2090 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2091 emitIgnoredExpr(me->getBase());
2092 return emitDirectCallee(fd);
2093 }
2094 // Else fall through to the indirect reference handling below.
2095 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2097 }
2098
2099 // Otherwise, we have an indirect reference.
2100 mlir::Value calleePtr;
2102 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2103 calleePtr = emitScalarExpr(e);
2104 functionType = ptrType->getPointeeType();
2105 } else {
2106 functionType = e->getType();
2107 calleePtr = emitLValue(e).getPointer();
2108 }
2109 assert(functionType->isFunctionType());
2110
2111 GlobalDecl gd;
2112 if (const auto *vd =
2113 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2114 gd = GlobalDecl(vd);
2115
2116 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2117 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2118 return callee;
2119}
2120
2124
2125 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2127
2128 if (isa<CUDAKernelCallExpr>(e)) {
2129 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
2130 return RValue::get(nullptr);
2131 }
2132
2133 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2134 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2135 // operator member call.
2136 if (const CXXMethodDecl *md =
2137 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2138 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2139 // A CXXOperatorCallExpr is created even for explicit object methods, but
2140 // these should be treated like static function calls. Fall through to do
2141 // that.
2142 }
2143
2144 CIRGenCallee callee = emitCallee(e->getCallee());
2145
2146 if (callee.isBuiltin())
2147 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2148 returnValue);
2149
2150 if (callee.isPseudoDestructor())
2152
2153 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2154}
2155
2156/// Emit code to compute the specified expression, ignoring the result.
2158 if (e->isPRValue()) {
2159 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2160 return;
2161 }
2162
2163 // Just emit it as an l-value and drop the result.
2164 emitLValue(e);
2165}
2166
2168 LValueBaseInfo *baseInfo) {
2170 assert(e->getType()->isArrayType() &&
2171 "Array to pointer decay must have array source type!");
2172
2173 // Expressions of array type can't be bitfields or vector elements.
2174 LValue lv = emitLValue(e);
2175 Address addr = lv.getAddress();
2176
2177 // If the array type was an incomplete type, we need to make sure
2178 // the decay ends up being the right type.
2179 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2180
2181 if (e->getType()->isVariableArrayType())
2182 return addr;
2183
2184 [[maybe_unused]] auto pointeeTy =
2185 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2186
2187 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2188 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2189 assert(pointeeTy == arrayTy);
2190
2191 // The result of this decay conversion points to an array element within the
2192 // base lvalue. However, since TBAA currently does not support representing
2193 // accesses to elements of member arrays, we conservatively represent accesses
2194 // to the pointee object as if it had no any base lvalue specified.
2195 // TODO: Support TBAA for member arrays.
2198
2199 mlir::Value ptr = builder.maybeBuildArrayDecay(
2200 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2201 convertTypeForMem(eltType));
2202 return Address(ptr, addr.getAlignment());
2203}
2204
2205/// Given the address of a temporary variable, produce an r-value of its type.
2209 switch (getEvaluationKind(type)) {
2210 case cir::TEK_Complex:
2211 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2212 case cir::TEK_Aggregate:
2213 return lvalue.asAggregateRValue();
2214 case cir::TEK_Scalar:
2215 return RValue::get(emitLoadOfScalar(lvalue, loc));
2216 }
2217 llvm_unreachable("bad evaluation kind");
2218}
2219
2220/// Emit an `if` on a boolean condition, filling `then` and `else` into
2221/// appropriated regions.
2222mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2223 const Stmt *thenS,
2224 const Stmt *elseS) {
2225 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2226 std::optional<mlir::Location> elseLoc;
2227 if (elseS)
2228 elseLoc = getLoc(elseS->getSourceRange());
2229
2230 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2232 cond, /*thenBuilder=*/
2233 [&](mlir::OpBuilder &, mlir::Location) {
2234 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2235 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2236 },
2237 thenLoc,
2238 /*elseBuilder=*/
2239 [&](mlir::OpBuilder &, mlir::Location) {
2240 assert(elseLoc && "Invalid location for elseS.");
2241 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2242 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2243 },
2244 elseLoc);
2245
2246 return mlir::LogicalResult::success(resThen.succeeded() &&
2247 resElse.succeeded());
2248}
2249
2250/// Emit an `if` on a boolean condition, filling `then` and `else` into
2251/// appropriated regions.
2253 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2254 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2255 std::optional<mlir::Location> elseLoc) {
2256 // Attempt to be as accurate as possible with IfOp location, generate
2257 // one fused location that has either 2 or 4 total locations, depending
2258 // on else's availability.
2259 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2260 if (elseLoc)
2261 ifLocs.push_back(*elseLoc);
2262 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2263
2264 // Emit the code with the fully general case.
2265 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2266 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2267 /*thenBuilder=*/thenBuilder,
2268 /*elseBuilder=*/elseBuilder);
2269}
2270
2271/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2272mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2273 const Expr *cond) {
2276 cond = cond->IgnoreParens();
2277
2278 // In LLVM the condition is reversed here for efficient codegen.
2279 // This should be done in CIR prior to LLVM lowering, if we do now
2280 // we can make CIR based diagnostics misleading.
2281 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2283
2284 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2285 Expr *trueExpr = condOp->getTrueExpr();
2286 Expr *falseExpr = condOp->getFalseExpr();
2287 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2288
2289 mlir::Value ternaryOpRes =
2290 cir::TernaryOp::create(
2291 builder, loc, condV, /*thenBuilder=*/
2292 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2293 mlir::Value lhs = emitScalarExpr(trueExpr);
2294 cir::YieldOp::create(b, loc, lhs);
2295 },
2296 /*elseBuilder=*/
2297 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2298 mlir::Value rhs = emitScalarExpr(falseExpr);
2299 cir::YieldOp::create(b, loc, rhs);
2300 })
2301 .getResult();
2302
2303 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2304 getContext().BoolTy, condOp->getExprLoc());
2305 }
2306
2307 if (isa<CXXThrowExpr>(cond)) {
2308 cgm.errorNYI("NYI");
2309 return createDummyValue(loc, cond->getType());
2310 }
2311
2312 // If the branch has a condition wrapped by __builtin_unpredictable,
2313 // create metadata that specifies that the branch is unpredictable.
2314 // Don't bother if not optimizing because that metadata would not be used.
2316
2317 // Emit the code with the fully general case.
2318 return evaluateExprAsBool(cond);
2319}
2320
2321mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2322 mlir::Location loc, CharUnits alignment,
2323 bool insertIntoFnEntryBlock,
2324 mlir::Value arraySize) {
2325 mlir::Block *entryBlock = insertIntoFnEntryBlock
2327 : curLexScope->getEntryBlock();
2328
2329 // If this is an alloca in the entry basic block of a cir.try and there's
2330 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2331 // scope instead. This is necessary in order to guarantee all SSA values are
2332 // reachable during cleanups.
2333 if (auto tryOp =
2334 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2335 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2336 entryBlock = &scopeOp.getScopeRegion().front();
2337 }
2338
2339 return emitAlloca(name, ty, loc, alignment,
2340 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2341}
2342
2343mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2344 mlir::Location loc, CharUnits alignment,
2345 mlir::OpBuilder::InsertPoint ip,
2346 mlir::Value arraySize) {
2347 // CIR uses its own alloca address space rather than follow the target data
2348 // layout like original CodeGen. The data layout awareness should be done in
2349 // the lowering pass instead.
2350 cir::PointerType localVarPtrTy =
2352 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2353
2354 mlir::Value addr;
2355 {
2356 mlir::OpBuilder::InsertionGuard guard(builder);
2357 builder.restoreInsertionPoint(ip);
2358 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2359 /*var type*/ ty, name, alignIntAttr, arraySize);
2361 }
2362 return addr;
2363}
2364
2365// Note: this function also emit constructor calls to support a MSVC extensions
2366// allowing explicit constructor function call.
2369 const Expr *callee = ce->getCallee()->IgnoreParens();
2370
2371 if (isa<BinaryOperator>(callee))
2373
2374 const auto *me = cast<MemberExpr>(callee);
2375 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2376
2377 if (md->isStatic()) {
2378 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2379 return RValue::get(nullptr);
2380 }
2381
2382 bool hasQualifier = me->hasQualifier();
2383 NestedNameSpecifier qualifier = me->getQualifier();
2384 bool isArrow = me->isArrow();
2385 const Expr *base = me->getBase();
2386
2388 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2389}
2390
2392 // Emit the expression as an lvalue.
2393 LValue lv = emitLValue(e);
2394 assert(lv.isSimple());
2395 mlir::Value value = lv.getPointer();
2396
2398
2399 return RValue::get(value);
2400}
2401
2403 LValueBaseInfo *pointeeBaseInfo) {
2404 if (refLVal.isVolatile())
2405 cgm.errorNYI(loc, "load of volatile reference");
2406
2407 cir::LoadOp load =
2408 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2409 refLVal.getAddress().getPointer());
2410
2412
2413 QualType pointeeType = refLVal.getType()->getPointeeType();
2414 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2415 return Address(load, convertTypeForMem(pointeeType), align);
2416}
2417
2419 mlir::Location loc,
2420 QualType refTy,
2421 AlignmentSource source) {
2422 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2423 LValueBaseInfo pointeeBaseInfo;
2425 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2426 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2427 pointeeBaseInfo);
2428}
2429
2430void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2431 cir::TrapOp::create(builder, loc);
2432 if (createNewBlock)
2433 builder.createBlock(builder.getBlock()->getParent());
2434}
2435
2437 bool createNewBlock) {
2439 cir::UnreachableOp::create(builder, getLoc(loc));
2440 if (createNewBlock)
2441 builder.createBlock(builder.getBlock()->getParent());
2442}
2443
2444mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2445 clang::QualType qt) {
2446 mlir::Type t = convertType(qt);
2447 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2448 return builder.createDummyValue(loc, t, alignment);
2449}
2450
2451//===----------------------------------------------------------------------===//
2452// CIR builder helpers
2453//===----------------------------------------------------------------------===//
2454
2456 const Twine &name, Address *alloca,
2457 mlir::OpBuilder::InsertPoint ip) {
2458 // FIXME: Should we prefer the preferred type alignment here?
2459 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2460 alloca, ip);
2461}
2462
2464 mlir::Location loc, const Twine &name,
2465 Address *alloca,
2466 mlir::OpBuilder::InsertPoint ip) {
2467 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2468 /*ArraySize=*/nullptr, alloca, ip);
2469 if (ty->isConstantMatrixType()) {
2471 cgm.errorNYI(loc, "temporary matrix value");
2472 }
2473 return result;
2474}
2475
2476/// This creates a alloca and inserts it into the entry block of the
2477/// current region.
2479 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2480 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2481 cir::AllocaOp alloca = ip.isSet()
2482 ? createTempAlloca(ty, loc, name, ip, arraySize)
2483 : createTempAlloca(ty, loc, name, arraySize);
2484 alloca.setAlignmentAttr(cgm.getSize(align));
2485 return Address(alloca, ty, align);
2486}
2487
2488/// This creates a alloca and inserts it into the entry block. The alloca is
2489/// casted to default address space if necessary.
2490// TODO(cir): Implement address space casting to match classic codegen's
2491// CreateTempAlloca behavior with DestLangAS parameter
2493 mlir::Location loc, const Twine &name,
2494 mlir::Value arraySize,
2495 Address *allocaAddr,
2496 mlir::OpBuilder::InsertPoint ip) {
2497 Address alloca =
2498 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2499 if (allocaAddr)
2500 *allocaAddr = alloca;
2501 mlir::Value v = alloca.getPointer();
2502 // Alloca always returns a pointer in alloca address space, which may
2503 // be different from the type defined by the language. For example,
2504 // in C++ the auto variables are in the default address space. Therefore
2505 // cast alloca to the default address space when necessary.
2506
2507 LangAS allocaAS = alloca.getAddressSpace()
2509 alloca.getAddressSpace().getValue().getUInt())
2514 getCIRAllocaAddressSpace().getValue().getUInt());
2515 }
2516
2517 if (dstTyAS != allocaAS) {
2519 builder.getPointerTo(ty, dstTyAS));
2520 }
2521 return Address(v, ty, align);
2522}
2523
2524/// This creates an alloca and inserts it into the entry block if \p ArraySize
2525/// is nullptr, otherwise inserts it at the current insertion point of the
2526/// builder.
2527cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2528 mlir::Location loc,
2529 const Twine &name,
2530 mlir::Value arraySize,
2531 bool insertIntoFnEntryBlock) {
2532 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2533 insertIntoFnEntryBlock, arraySize)
2534 .getDefiningOp());
2535}
2536
2537/// This creates an alloca and inserts it into the provided insertion point
2538cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2539 mlir::Location loc,
2540 const Twine &name,
2541 mlir::OpBuilder::InsertPoint ip,
2542 mlir::Value arraySize) {
2543 assert(ip.isSet() && "Insertion point is not set");
2544 return mlir::cast<cir::AllocaOp>(
2545 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2546 .getDefiningOp());
2547}
2548
2549/// Try to emit a reference to the given value without producing it as
2550/// an l-value. For many cases, this is just an optimization, but it avoids
2551/// us needing to emit global copies of variables if they're named without
2552/// triggering a formal use in a context where we can't emit a direct
2553/// reference to them, for instance if a block or lambda or a member of a
2554/// local class uses a const int variable or constexpr variable from an
2555/// enclosing function.
2556///
2557/// For named members of enums, this is the only way they are emitted.
2560 const ValueDecl *value = refExpr->getDecl();
2561
2562 // There is a lot more to do here, but for now only EnumConstantDecl is
2563 // supported.
2565
2566 // The value needs to be an enum constant or a constant variable.
2567 if (!isa<EnumConstantDecl>(value))
2568 return ConstantEmission();
2569
2570 Expr::EvalResult result;
2571 if (!refExpr->EvaluateAsRValue(result, getContext()))
2572 return ConstantEmission();
2573
2574 QualType resultType = refExpr->getType();
2575
2576 // As long as we're only handling EnumConstantDecl, there should be no
2577 // side-effects.
2578 assert(!result.HasSideEffects);
2579
2580 // Emit as a constant.
2581 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2582 // somewhat heavy refactoring...)
2583 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2584 refExpr->getLocation(), result.Val, resultType);
2585 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2586 assert(cstToEmit && "expected a typed attribute");
2587
2589
2590 return ConstantEmission::forValue(cstToEmit);
2591}
2592
2596 return tryEmitAsConstant(dre);
2597 return ConstantEmission();
2598}
2599
2601 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2602 assert(constant && "not a constant");
2603 if (constant.isReference()) {
2604 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2605 return {};
2606 }
2607 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2608}
2609
2611 const StringLiteral *sl = e->getFunctionName();
2612 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2613 auto fn = cast<cir::FuncOp>(curFn);
2614 StringRef fnName = fn.getName();
2615 fnName.consume_front("\01");
2616 std::array<StringRef, 2> nameItems = {
2618 std::string gvName = llvm::join(nameItems, ".");
2619 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2620 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2621
2622 return emitStringLiteralLValue(sl, gvName);
2623}
2624
2629
2630namespace {
2631// Handle the case where the condition is a constant evaluatable simple integer,
2632// which means we don't have to separately handle the true/false blocks.
2633std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2635 const Expr *condExpr = e->getCond();
2636 llvm::APSInt condExprVal;
2637 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2638 return std::nullopt;
2639
2640 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2641 if (!condExprVal.getBoolValue())
2642 std::swap(live, dead);
2643
2644 if (cgf.containsLabel(dead))
2645 return std::nullopt;
2646
2647 // If the true case is live, we need to track its region.
2650 // If a throw expression we emit it and return an undefined lvalue
2651 // because it can't be used.
2652 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2653 cgf.emitCXXThrowExpr(throwExpr);
2654 // Return an undefined lvalue - the throw terminates execution
2655 // so this value will never actually be used
2656 mlir::Type elemTy = cgf.convertType(dead->getType());
2657 mlir::Value undefPtr =
2658 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2659 cgf.getLoc(throwExpr->getSourceRange()));
2660 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2661 dead->getType());
2662 }
2663 return cgf.emitLValue(live);
2664}
2665
2666/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2667/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2668/// LValue is returned and the current block has been terminated.
2669static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2670 const Expr *operand) {
2671 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2672 cgf.emitCXXThrowExpr(throwExpr);
2673 return std::nullopt;
2674 }
2675
2676 return cgf.emitLValue(operand);
2677}
2678} // namespace
2679
2680// Create and generate the 3 blocks for a conditional operator.
2681// Leaves the 'current block' in the continuation basic block.
2682template <typename FuncTy>
2685 const FuncTy &branchGenFunc) {
2686 ConditionalInfo info;
2687 ConditionalEvaluation eval(*this);
2688 mlir::Location loc = getLoc(e->getSourceRange());
2689 CIRGenBuilderTy &builder = getBuilder();
2690
2691 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2693 mlir::Type yieldTy{};
2694
2695 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2696 const Expr *expr, std::optional<LValue> &resultLV) {
2697 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2698 curLexScope->setAsTernary();
2699
2701 eval.beginEvaluation();
2702 resultLV = branchGenFunc(*this, expr);
2703 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2704 eval.endEvaluation();
2705
2706 if (resultPtr) {
2707 yieldTy = resultPtr.getType();
2708 cir::YieldOp::create(b, loc, resultPtr);
2709 } else {
2710 // If LHS or RHS is a void expression we need
2711 // to patch arms as to properly match yield types.
2712 // If the current block's terminator is an UnreachableOp (from a throw),
2713 // we don't need a yield
2714 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2715 mlir::Operation *terminator =
2716 builder.getInsertionBlock()->getTerminator();
2717 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2718 insertPoints.push_back(b.saveInsertionPoint());
2719 }
2720 }
2721 };
2722
2723 info.result = cir::TernaryOp::create(
2724 builder, loc, condV,
2725 /*trueBuilder=*/
2726 [&](mlir::OpBuilder &b, mlir::Location loc) {
2727 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2728 },
2729 /*falseBuilder=*/
2730 [&](mlir::OpBuilder &b, mlir::Location loc) {
2731 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2732 })
2733 .getResult();
2734
2735 // If both arms are void, so be it.
2736 if (!yieldTy)
2737 yieldTy = voidTy;
2738
2739 // Insert required yields.
2740 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2741 mlir::OpBuilder::InsertionGuard guard(builder);
2742 builder.restoreInsertionPoint(toInsert);
2743
2744 // Block does not return: build empty yield.
2745 if (!yieldTy) {
2746 cir::YieldOp::create(builder, loc);
2747 } else { // Block returns: set null yield value.
2748 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2749 cir::YieldOp::create(builder, loc, op0);
2750 }
2751 }
2752
2753 return info;
2754}
2755
2758 if (!expr->isGLValue()) {
2759 // ?: here should be an aggregate.
2760 assert(hasAggregateEvaluationKind(expr->getType()) &&
2761 "Unexpected conditional operator!");
2762 return emitAggExprToLValue(expr);
2763 }
2764
2765 OpaqueValueMapping binding(*this, expr);
2766 if (std::optional<LValue> res =
2767 handleConditionalOperatorLValueSimpleCase(*this, expr))
2768 return *res;
2769
2770 ConditionalInfo info =
2771 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2772 return emitLValueOrThrowExpression(cgf, e);
2773 });
2774
2775 if ((info.lhs && !info.lhs->isSimple()) ||
2776 (info.rhs && !info.rhs->isSimple())) {
2777 cgm.errorNYI(expr->getSourceRange(),
2778 "unsupported conditional operator with non-simple lvalue");
2779 return LValue();
2780 }
2781
2782 if (info.lhs && info.rhs) {
2783 Address lhsAddr = info.lhs->getAddress();
2784 Address rhsAddr = info.rhs->getAddress();
2785 Address result(info.result, lhsAddr.getElementType(),
2786 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2787 AlignmentSource alignSource =
2788 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2789 info.rhs->getBaseInfo().getAlignmentSource());
2791 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2792 }
2793
2794 assert((info.lhs || info.rhs) &&
2795 "both operands of glvalue conditional are throw-expressions?");
2796 return info.lhs ? *info.lhs : *info.rhs;
2797}
2798
2799/// An LValue is a candidate for having its loads and stores be made atomic if
2800/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2801/// performing such an operation can be performed without a libcall.
2803 if (!cgm.getLangOpts().MSVolatile)
2804 return false;
2805
2806 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2807 return false;
2808}
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
QualType getElementType() const
Definition TypeBase.h:3735
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
Expr * getRHS() const
Definition Expr.h:4090
Opcode getOpcode() const
Definition Expr.h:4083
mlir::Value getPointer() const
Definition Address.h:95
mlir::Type getElementType() const
Definition Address.h:122
static Address invalid()
Definition Address.h:73
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
cir::TargetAddressSpaceAttr getAddressSpace() const
Definition Address.h:130
clang::CharUnits getAlignment() const
Definition Address.h:135
mlir::Type getType() const
Definition Address.h:114
bool isValid() const
Definition Address.h:74
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:138
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
const TargetCIRGenInfo & getTargetHooks() const
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:256
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3126
Expr * getCallee()
Definition Expr.h:3090
arg_range arguments()
Definition Expr.h:3195
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CastKind getCastKind() const
Definition Expr.h:3720
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3763
bool changesVolatileQualification() const
Return.
Definition Expr.h:3810
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3605
bool isFileScope() const
Definition Expr.h:3637
const Expr * getInitializer() const
Definition Expr.h:3633
ConditionalOperator - The ?
Definition Expr.h:4391
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:487
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:83
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1545
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6564
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4415
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4447
const Expr * getBase() const
Definition Expr.h:6581
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4826
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4766
Represents a function declaration or definition.
Definition Decl.h:2000
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3447
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3588
Expr * getBase() const
Definition Expr.h:3441
bool isArrow() const
Definition Expr.h:3548
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3559
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8428
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8342
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4324
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3925
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8901
bool isBooleanType() const
Definition TypeBase.h:9031
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:419
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9197
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8638
bool isFunctionPointerType() const
Definition TypeBase.h:8606
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2338
bool isConstantMatrixType() const
Definition TypeBase.h:8706
bool isPointerType() const
Definition TypeBase.h:8539
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9188
bool isReferenceType() const
Definition TypeBase.h:8563
bool isVariableArrayType() const
Definition TypeBase.h:8650
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorBoolType() const
Definition TypeBase.h:8686
bool isAnyComplexType() const
Definition TypeBase.h:8674
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9074
bool isAtomicType() const
Definition TypeBase.h:8727
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
bool isFunctionType() const
Definition TypeBase.h:8535
bool isVectorType() const
Definition TypeBase.h:8678
bool isSubscriptableVectorType() const
Definition TypeBase.h:8698
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9121
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2355
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2179
bool hasInit() const
Definition Decl.cpp:2409
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2377
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Represents a GCC generic vector type.
Definition TypeBase.h:4176
Defines the clang::TargetInfo interface.
OverflowBehavior
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:937
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612