clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "TargetInfo.h"
19#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
20#include "mlir/IR/BuiltinAttributes.h"
21#include "mlir/IR/Value.h"
22#include "clang/AST/Attr.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
33#include <optional>
34
35using namespace clang;
36using namespace clang::CIRGen;
37using namespace cir;
38
39/// Get the address of a zero-sized field within a record. Zero-sized fields
40/// (e.g. empty bases with [[no_unique_address]]) don't appear in the CIR
41/// record layout, so we compute their address using the ASTContext field
42/// offset and byte-level pointer arithmetic instead of cir.get_member.
44 const FieldDecl *field) {
45 CIRGenBuilderTy &builder = cgf.getBuilder();
47 cgf.getContext().getFieldOffset(field));
48 mlir::Type fieldType = cgf.convertType(field->getType());
49
50 if (offset.isZero()) {
51 return Address(builder.createPtrBitcast(base.getPointer(), fieldType),
52 base.getAlignment());
53 }
54
55 // Cast to byte pointer, stride by the field offset, then cast to the
56 // field pointer type (CIR pointers are typed, so we need explicit casts
57 // unlike OG's opaque-pointer GEP).
58 mlir::Location loc = cgf.getLoc(field->getLocation());
59 mlir::Value addr =
60 builder.createPtrBitcast(base.getPointer(), builder.getUInt8Ty());
61 addr = builder.createPtrStride(loc, addr,
62 builder.getUInt64(offset.getQuantity(), loc));
63 addr = builder.createPtrBitcast(addr, fieldType);
64 return Address(addr, base.getAlignment().alignmentAtOffset(offset));
65}
66
68 const FieldDecl *field,
69 llvm::StringRef fieldName,
70 unsigned fieldIndex) {
72 return emitAddrOfZeroSizeField(*this, base, field);
73
74 mlir::Location loc = getLoc(field->getLocation());
75
76 // Retrieve layout information for both type resolution and alignment.
77 const RecordDecl *rec = field->getParent();
78 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
79 unsigned idx = layout.getCIRFieldNo(field);
80
81 // For potentially-overlapping fields (e.g. [[no_unique_address]]), the
82 // record stores the base subobject type (without tail padding) rather than
83 // the complete object type. Use the record's member type for get_member,
84 // then bitcast to the complete type for downstream use.
85 //
86 // For unions, all fields map to index 0, so we use the field's declared type
87 // directly instead of looking up the member type from the layout.
88 mlir::Type fieldType = convertType(field->getType());
89 auto fieldPtr = cir::PointerType::get(fieldType);
90 bool needsBitcast = false;
91
92 if (!rec->isUnion() && field->isPotentiallyOverlapping()) {
93 mlir::Type memberType = layout.getCIRType().getMembers()[idx];
94 fieldPtr = cir::PointerType::get(memberType);
95 needsBitcast = true;
96 }
97
98 // For most cases fieldName is the same as field->getName() but for lambdas,
99 // which do not currently carry the name, so it can be passed down from the
100 // CaptureStmt.
101 mlir::Value addr = builder.createGetMember(loc, fieldPtr, base.getPointer(),
102 fieldName, fieldIndex);
103
104 // If the field is potentially overlapping, the record member uses the base
105 // subobject type. Cast to the complete object pointer type expected by
106 // callers (analogous to OG's opaque pointer behavior).
107 if (needsBitcast)
108 addr = builder.createPtrBitcast(addr, fieldType);
109
111 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
112 return Address(addr, base.getAlignment().alignmentAtOffset(offset));
113}
114
115/// Given an expression of pointer type, try to
116/// derive a more accurate bound on the alignment of the pointer.
118 LValueBaseInfo *baseInfo) {
119 // We allow this with ObjC object pointers because of fragile ABIs.
120 assert(expr->getType()->isPointerType() ||
121 expr->getType()->isObjCObjectPointerType());
122 expr = expr->IgnoreParens();
123
124 // Casts:
125 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
126 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
127 cgm.emitExplicitCastExprType(ece);
128
129 switch (ce->getCastKind()) {
130 // Non-converting casts (but not C's implicit conversion from void*).
131 case CK_BitCast:
132 case CK_NoOp:
133 case CK_AddressSpaceConversion: {
134 if (const auto *ptrTy =
135 ce->getSubExpr()->getType()->getAs<PointerType>()) {
136 if (ptrTy->getPointeeType()->isVoidType())
137 break;
138
139 LValueBaseInfo innerBaseInfo;
141 Address addr =
142 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
143 if (baseInfo)
144 *baseInfo = innerBaseInfo;
145
146 if (isa<ExplicitCastExpr>(ce)) {
147 LValueBaseInfo targetTypeBaseInfo;
148
149 const QualType pointeeType = expr->getType()->getPointeeType();
150 const CharUnits align =
151 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
152
153 // If the source l-value is opaque, honor the alignment of the
154 // casted-to type.
155 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
156 if (baseInfo)
157 baseInfo->mergeForCast(targetTypeBaseInfo);
158 addr = Address(addr.getPointer(), addr.getElementType(), align);
159 }
160 }
161
163
164 const mlir::Type eltTy =
165 convertTypeForMem(expr->getType()->getPointeeType());
166 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
167 addr, eltTy);
169
170 return addr;
171 }
172 break;
173 }
174
175 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
176 case CK_ArrayToPointerDecay:
177 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
178
179 case CK_UncheckedDerivedToBase:
180 case CK_DerivedToBase: {
183 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
184 const CXXRecordDecl *derived =
185 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
186 return getAddressOfBaseClass(addr, derived, ce->path(),
188 ce->getExprLoc());
189 }
190
191 case CK_AnyPointerToBlockPointerCast:
192 case CK_BaseToDerived:
193 case CK_BaseToDerivedMemberPointer:
194 case CK_BlockPointerToObjCPointerCast:
195 case CK_BuiltinFnToFnPtr:
196 case CK_CPointerToObjCPointerCast:
197 case CK_DerivedToBaseMemberPointer:
198 case CK_Dynamic:
199 case CK_FunctionToPointerDecay:
200 case CK_IntegralToPointer:
201 case CK_LValueToRValue:
202 case CK_LValueToRValueBitCast:
203 case CK_NullToMemberPointer:
204 case CK_NullToPointer:
205 case CK_ReinterpretMemberPointer:
206 // Common pointer conversions, nothing to do here.
207 // TODO: Is there any reason to treat base-to-derived conversions
208 // specially?
209 break;
210
211 case CK_ARCConsumeObject:
212 case CK_ARCExtendBlockObject:
213 case CK_ARCProduceObject:
214 case CK_ARCReclaimReturnedObject:
215 case CK_AtomicToNonAtomic:
216 case CK_BooleanToSignedIntegral:
217 case CK_ConstructorConversion:
218 case CK_CopyAndAutoreleaseBlockObject:
219 case CK_Dependent:
220 case CK_FixedPointCast:
221 case CK_FixedPointToBoolean:
222 case CK_FixedPointToFloating:
223 case CK_FixedPointToIntegral:
224 case CK_FloatingCast:
225 case CK_FloatingComplexCast:
226 case CK_FloatingComplexToBoolean:
227 case CK_FloatingComplexToIntegralComplex:
228 case CK_FloatingComplexToReal:
229 case CK_FloatingRealToComplex:
230 case CK_FloatingToBoolean:
231 case CK_FloatingToFixedPoint:
232 case CK_FloatingToIntegral:
233 case CK_HLSLAggregateSplatCast:
234 case CK_HLSLArrayRValue:
235 case CK_HLSLElementwiseCast:
236 case CK_HLSLVectorTruncation:
237 case CK_HLSLMatrixTruncation:
238 case CK_IntToOCLSampler:
239 case CK_IntegralCast:
240 case CK_IntegralComplexCast:
241 case CK_IntegralComplexToBoolean:
242 case CK_IntegralComplexToFloatingComplex:
243 case CK_IntegralComplexToReal:
244 case CK_IntegralRealToComplex:
245 case CK_IntegralToBoolean:
246 case CK_IntegralToFixedPoint:
247 case CK_IntegralToFloating:
248 case CK_LValueBitCast:
249 case CK_MatrixCast:
250 case CK_MemberPointerToBoolean:
251 case CK_NonAtomicToAtomic:
252 case CK_ObjCObjectLValueCast:
253 case CK_PointerToBoolean:
254 case CK_PointerToIntegral:
255 case CK_ToUnion:
256 case CK_ToVoid:
257 case CK_UserDefinedConversion:
258 case CK_VectorSplat:
259 case CK_ZeroToOCLOpaqueType:
260 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
261 }
262 }
263
264 // Unary &
265 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
266 // TODO(cir): maybe we should use a CIR unary op for pointers here instead.
267 if (uo->getOpcode() == UO_AddrOf) {
268 LValue lv = emitLValue(uo->getSubExpr());
269 if (baseInfo)
270 *baseInfo = lv.getBaseInfo();
272 return lv.getAddress();
273 }
274 }
275
276 // std::addressof and variants.
277 if (auto const *call = dyn_cast<CallExpr>(expr)) {
278 switch (call->getBuiltinCallee()) {
279 default:
280 break;
281 case Builtin::BIaddressof:
282 case Builtin::BI__addressof:
283 case Builtin::BI__builtin_addressof: {
284 LValue lv = emitLValue(call->getArg(0));
285 if (baseInfo)
286 *baseInfo = lv.getBaseInfo();
288 return lv.getAddress();
289 }
290 }
291 }
292
293 // Otherwise, use the alignment of the type.
295 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
296 /*forPointeeType=*/true, baseInfo);
297}
298
300 bool isInit) {
301 if (!dst.isSimple()) {
302 if (dst.isVectorElt()) {
303 // Read/modify/write the vector, inserting the new element
304 const mlir::Location loc = dst.getVectorPointer().getLoc();
305 const mlir::Value vector =
306 builder.createLoad(loc, dst.getVectorAddress());
307 const mlir::Value newVector = cir::VecInsertOp::create(
308 builder, loc, vector, src.getValue(), dst.getVectorIdx());
309 builder.createStore(loc, newVector, dst.getVectorAddress());
310 return;
311 }
312
313 assert(dst.isBitField() && "Unknown LValue type");
315 return;
316
317 cgm.errorNYI(dst.getPointer().getLoc(),
318 "emitStoreThroughLValue: non-simple lvalue");
319 return;
320 }
321
323
324 assert(src.isScalar() && "Can't emit an aggregate store with this method");
325 emitStoreOfScalar(src.getValue(), dst, isInit);
326}
327
328static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
329 const VarDecl *vd) {
330 QualType t = e->getType();
331
332 // If it's thread_local, emit a call to its wrapper function instead.
333 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
334 cgf.cgm.errorNYI(e->getSourceRange(),
335 "emitGlobalVarDeclLValue: thread_local variable");
336
337 // Check if the variable is marked as declare target with link clause in
338 // device codegen.
339 if (cgf.getLangOpts().OpenMP)
340 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
341
342 // Traditional LLVM codegen handles thread local separately, CIR handles
343 // as part of getAddrOfGlobalVar.
344 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
345
346 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
347 cir::PointerType realPtrTy = cir::PointerType::get(
348 realVarTy, mlir::cast<cir::PointerType>(v.getType()).getAddrSpace());
349 if (realPtrTy != v.getType())
350 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
351
352 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
353 Address addr(v, realVarTy, alignment);
354 LValue lv;
355 if (vd->getType()->isReferenceType())
356 lv = cgf.emitLoadOfReferenceLValue(addr, cgf.getLoc(e->getSourceRange()),
358 else
359 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
361 return lv;
362}
363
364void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
365 bool isVolatile, QualType ty,
366 LValueBaseInfo baseInfo, bool isInit,
367 bool isNontemporal) {
368
369 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
370 // Boolean vectors use `iN` as storage type.
371 if (clangVecTy->isExtVectorBoolType())
372 cgm.errorNYI(addr.getPointer().getLoc(),
373 "emitStoreOfScalar ExtVectorBoolType");
374
375 // Handle vectors of size 3 like size 4 for better performance.
376 const mlir::Type elementType = addr.getElementType();
377 const auto vecTy = cast<cir::VectorType>(elementType);
378
379 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
381 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
382 cgm.errorNYI(addr.getPointer().getLoc(),
383 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
384 }
385
386 value = emitToMemory(value, ty);
387
389 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
390 if (ty->isAtomicType() ||
391 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
392 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
393 return;
394 }
395
396 // Update the alloca with more info on initialization.
397 assert(addr.getPointer() && "expected pointer to exist");
398 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
399 if (currVarDecl && srcAlloca) {
400 const VarDecl *vd = currVarDecl;
401 assert(vd && "VarDecl expected");
402 if (vd->hasInit())
403 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
404 }
405
406 assert(currSrcLoc && "must pass in source location");
407 builder.createStore(*currSrcLoc, value, addr, isVolatile);
408
409 if (isNontemporal) {
410 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
411 return;
412 }
413
415}
416
417// TODO: Replace this with a proper TargetInfo function call.
418/// Helper method to check if the underlying ABI is AAPCS
419static bool isAAPCS(const TargetInfo &targetInfo) {
420 return targetInfo.getABI().starts_with("aapcs");
421}
422
424 LValue dst) {
425
426 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
427 mlir::Type resLTy = convertTypeForMem(dst.getType());
428 Address ptr = dst.getBitFieldAddress();
429
430 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
431 dst.isVolatileQualified() &&
432 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
433
434 assert(currSrcLoc && "must pass in source location");
435
436 return builder.createSetBitfield(*currSrcLoc, resLTy, ptr,
437 ptr.getElementType(), src.getValue(), info,
438 dst.isVolatileQualified(), useVoaltile);
439}
440
442 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
443
444 // Get the output type.
445 mlir::Type resLTy = convertType(lv.getType());
446 Address ptr = lv.getBitFieldAddress();
447
448 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
449 isAAPCS(cgm.getTarget());
450
451 mlir::Value field =
452 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
453 info, lv.isVolatile(), useVoaltile);
455 return RValue::get(field);
456}
457
459 const FieldDecl *field,
460 mlir::Type fieldType,
461 unsigned index) {
462 mlir::Location loc = getLoc(field->getLocation());
463 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
465 cir::GetMemberOp sea = getBuilder().createGetMember(
466 loc, fieldPtr, base.getPointer(), field->getName(),
467 rec.isUnion() ? field->getFieldIndex() : index);
469 rec.getElementOffset(cgm.getDataLayout().layout, index));
470 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
471}
472
474 const FieldDecl *field) {
475 LValueBaseInfo baseInfo = base.getBaseInfo();
476 const CIRGenRecordLayout &layout =
477 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
478 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
479
481
482 unsigned idx = layout.getCIRFieldNo(field);
483 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
484
485 mlir::Location loc = getLoc(field->getLocation());
486 if (addr.getElementType() != info.storageType)
487 addr = builder.createElementBitCast(loc, addr, info.storageType);
488
489 QualType fieldType =
491 // TODO(cir): Support TBAA for bit fields.
493 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
494 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
495}
496
498 LValueBaseInfo baseInfo = base.getBaseInfo();
499
500 if (field->isBitField())
501 return emitLValueForBitField(base, field);
502
503 QualType fieldType = field->getType();
504 const RecordDecl *rec = field->getParent();
505 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
506 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
508
509 Address addr = base.getAddress();
510 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
511 if (cgm.getCodeGenOpts().StrictVTablePointers &&
512 classDecl->isDynamicClass()) {
513 cgm.errorNYI(field->getSourceRange(),
514 "emitLValueForField: strict vtable for dynamic class");
515 }
516 }
517
518 unsigned recordCVR = base.getVRQualifiers();
519
520 llvm::StringRef fieldName = field->getName();
521 unsigned fieldIndex;
522 if (cgm.lambdaFieldToName.count(field))
523 fieldName = cgm.lambdaFieldToName[field];
524
525 // Empty fields don't have entries in the record layout, so handle them
526 // separately. They just use the base address directly with the right type.
527 if (!rec->isUnion() && isEmptyFieldForLayout(getContext(), field)) {
528 addr = emitAddrOfZeroSizeField(*this, addr, field);
529 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
530 lv.getQuals().addCVRQualifiers(recordCVR);
531 return lv;
532 }
533
534 if (rec->isUnion())
535 fieldIndex = field->getFieldIndex();
536 else {
537 const CIRGenRecordLayout &layout =
538 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
539 fieldIndex = layout.getCIRFieldNo(field);
540 }
541
542 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
544
545 // If this is a reference field, load the reference right now.
546 if (fieldType->isReferenceType()) {
548 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
549 if (recordCVR & Qualifiers::Volatile)
550 refLVal.getQuals().addVolatile();
551 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
552 &fieldBaseInfo);
553
554 // Qualifiers on the struct don't apply to the referencee.
555 recordCVR = 0;
556 fieldType = fieldType->getPointeeType();
557 }
558
559 if (field->hasAttr<AnnotateAttr>()) {
560 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
561 return LValue();
562 }
563
564 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
565 lv.getQuals().addCVRQualifiers(recordCVR);
566
567 // __weak attribute on a field is ignored.
569 cgm.errorNYI(field->getSourceRange(),
570 "emitLValueForField: __weak attribute");
571 return LValue();
572 }
573
574 return lv;
575}
576
578 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
579 QualType fieldType = field->getType();
580
581 if (!fieldType->isReferenceType())
582 return emitLValueForField(base, field);
583
584 Address v = base.getAddress();
585 if (isEmptyFieldForLayout(getContext(), field)) {
586 v = emitAddrOfZeroSizeField(*this, v, field);
587 } else {
588 const CIRGenRecordLayout &layout =
589 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
590 unsigned fieldIndex = layout.getCIRFieldNo(field);
591 v = emitAddrOfFieldStorage(v, field, fieldName, fieldIndex);
592 }
593
594 // Make sure that the address is pointing to the right type.
595 mlir::Type memTy = convertTypeForMem(fieldType);
596 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
597
598 // TODO: Generate TBAA information that describes this access as a structure
599 // member access and not just an access to an object of the field's type. This
600 // should be similar to what we do in EmitLValueForField().
601 LValueBaseInfo baseInfo = base.getBaseInfo();
602 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
603 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
605 return makeAddrLValue(v, fieldType, fieldBaseInfo);
606}
607
608/// Converts a scalar value from its primary IR type (as returned
609/// by ConvertType) to its load/store type.
610mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
611 if (auto *atomicTy = ty->getAs<AtomicType>())
612 ty = atomicTy->getValueType();
613
614 if (ty->isExtVectorBoolType()) {
615 cgm.errorNYI("emitToMemory: extVectorBoolType");
616 }
617
618 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
619 // kept as `cir.int<N>` until further lowering
620
621 return value;
622}
623
624mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
625 if (auto *atomicTy = ty->getAs<AtomicType>())
626 ty = atomicTy->getValueType();
627
629 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
630 }
631
632 return value;
633}
634
635void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
636 bool isInit) {
637 if (lvalue.getType()->isConstantMatrixType()) {
638 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
639 return;
640 }
641
642 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
643 lvalue.getType(), lvalue.getBaseInfo(), isInit,
644 /*isNontemporal=*/false);
645}
646
647mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
648 QualType ty, SourceLocation loc,
649 LValueBaseInfo baseInfo) {
650 // Traditional LLVM codegen handles thread local separately, CIR handles
651 // as part of getAddrOfGlobalVar (GetGlobalOp).
652 mlir::Type eltTy = addr.getElementType();
653
654 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
655 if (clangVecTy->isExtVectorBoolType()) {
656 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
657 return nullptr;
658 }
659
660 const auto vecTy = cast<cir::VectorType>(eltTy);
661
662 // Handle vectors of size 3 like size 4 for better performance.
664 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
665 cgm.errorNYI(addr.getPointer().getLoc(),
666 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
667 }
668
670 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
671 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
672 cgm.errorNYI("emitLoadOfScalar: load atomic");
673
674 if (mlir::isa<cir::VoidType>(eltTy))
675 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
676
678
679 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
680 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
681 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
682
683 return loadOp;
684}
685
687 SourceLocation loc) {
690 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
691 lvalue.getType(), loc, lvalue.getBaseInfo());
692}
693
694/// Given an expression that represents a value lvalue, this
695/// method emits the address of the lvalue, then loads the result as an rvalue,
696/// returning the rvalue.
698 assert(!lv.getType()->isFunctionType());
699 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
700
701 if (lv.isBitField())
702 return emitLoadOfBitfieldLValue(lv, loc);
703
704 if (lv.isSimple())
705 return RValue::get(emitLoadOfScalar(lv, loc));
706
707 if (lv.isVectorElt()) {
708 const mlir::Value load =
709 builder.createLoad(getLoc(loc), lv.getVectorAddress());
710 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
711 lv.getVectorIdx()));
712 }
713
714 if (lv.isExtVectorElt())
716
717 cgm.errorNYI(loc, "emitLoadOfLValue");
718 return RValue::get(nullptr);
719}
720
721int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
722 const mlir::ArrayAttr elts) {
723 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
724 return elt.getInt();
725}
726
727// If this is a reference to a subset of the elements of a vector, create an
728// appropriate shufflevector.
730 mlir::Location loc = lv.getExtVectorPointer().getLoc();
731 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
732
733 // HLSL allows treating scalars as one-element vectors. Converting the scalar
734 // IR value to a vector here allows the rest of codegen to behave as normal.
735 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
736 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
737 return {};
738 }
739
740 const mlir::ArrayAttr elts = lv.getExtVectorElts();
741
742 // If the result of the expression is a non-vector type, we must be extracting
743 // a single element. Just codegen as an extractelement.
744 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
745 if (!exprVecTy) {
746 int64_t indexValue = getAccessedFieldNo(0, elts);
747 cir::ConstantOp index =
748 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
749 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
750 }
751
752 // Always use shuffle vector to try to retain the original program structure
754 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
755 mask.push_back(getAccessedFieldNo(i, elts));
756
757 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
758 if (lv.getType()->isExtVectorBoolType()) {
759 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
760 return {};
761 }
762
763 return RValue::get(resultVec);
764}
765
766LValue
768 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
769 "unexpected binary operator opcode");
770
771 Address baseAddr = Address::invalid();
772 if (e->getOpcode() == BO_PtrMemD)
773 baseAddr = emitLValue(e->getLHS()).getAddress();
774 else
775 baseAddr = emitPointerWithAlignment(e->getLHS());
776
777 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
778
779 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
780
781 LValueBaseInfo baseInfo;
783 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
784 memberPtrTy, &baseInfo);
785
786 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
787}
788
789/// Generates lvalue for partial ext_vector access.
791 mlir::Location loc) {
792 Address vectorAddress = lv.getExtVectorAddress();
793 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
794 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
795 Address castToPointerElement =
796 vectorAddress.withElementType(builder, vectorElementTy);
797
798 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
799 unsigned idx = getAccessedFieldNo(0, extVecElts);
800 mlir::Value idxValue =
801 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
802
803 mlir::Value elementValue = builder.getArrayElement(
804 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
805 /*shouldDecay=*/false);
806
807 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
808 const CharUnits alignment =
809 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
810 return Address(elementValue, vectorElementTy, alignment);
811}
812
813static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
815 return cgm.getAddrOfFunction(gd);
816}
817
819 mlir::Value thisValue) {
820 return cgf.emitLValueForLambdaField(fd, thisValue);
821}
822
823/// Given that we are currently emitting a lambda, emit an l-value for
824/// one of its members.
825///
827 mlir::Value thisValue) {
828 bool hasExplicitObjectParameter = false;
829 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
830 LValue lambdaLV;
831 if (methD) {
832 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
833 assert(methD->getParent()->isLambda());
834 assert(methD->getParent() == field->getParent());
835 }
836 if (hasExplicitObjectParameter) {
837 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
838 } else {
839 QualType lambdaTagType =
841 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
842 }
843 return emitLValueForField(lambdaLV, field);
844}
845
849
850static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
851 GlobalDecl gd) {
852 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
853 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
854 mlir::Location loc = cgf.getLoc(e->getSourceRange());
855 CharUnits align = cgf.getContext().getDeclAlign(fd);
856
858
859 mlir::Type fnTy = funcOp.getFunctionType();
860 mlir::Type ptrTy = cir::PointerType::get(fnTy);
861 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
862 funcOp.getSymName());
863
864 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
865 fnTy = cgf.convertType(fd->getType());
866 ptrTy = cir::PointerType::get(fnTy);
867
868 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
869 cir::CastKind::bitcast, addr);
870 }
871
872 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
874}
875
876/// Determine whether we can emit a reference to \p vd from the current
877/// context, despite not necessarily having seen an odr-use of the variable in
878/// this context.
879/// TODO(cir): This could be shared with classic codegen.
881 const DeclRefExpr *e,
882 const VarDecl *vd) {
883 // For a variable declared in an enclosing scope, do not emit a spurious
884 // reference even if we have a capture, as that will emit an unwarranted
885 // reference to our capture state, and will likely generate worse code than
886 // emitting a local copy.
888 return false;
889
890 // For a local declaration declared in this function, we can always reference
891 // it even if we don't have an odr-use.
892 if (vd->hasLocalStorage()) {
893 return vd->getDeclContext() ==
894 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
895 }
896
897 // For a global declaration, we can emit a reference to it if we know
898 // for sure that we are able to emit a definition of it.
899 vd = vd->getDefinition(cgf.getContext());
900 if (!vd)
901 return false;
902
903 // Don't emit a spurious reference if it might be to a variable that only
904 // exists on a different device / target.
905 // FIXME: This is unnecessarily broad. Check whether this would actually be a
906 // cross-target reference.
907 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
908 cgf.getLangOpts().OpenCL) {
909 return false;
910 }
911
912 // We can emit a spurious reference only if the linkage implies that we'll
913 // be emitting a non-interposable symbol that will be retained until link
914 // time.
915 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
916 case cir::GlobalLinkageKind::ExternalLinkage:
917 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
918 case cir::GlobalLinkageKind::WeakODRLinkage:
919 case cir::GlobalLinkageKind::InternalLinkage:
920 case cir::GlobalLinkageKind::PrivateLinkage:
921 return true;
922 default:
923 return false;
924 }
925}
926
928 const NamedDecl *nd = e->getDecl();
929 QualType ty = e->getType();
930
931 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
932 "should not emit an unevaluated operand");
933
934 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
935 // Global Named registers access via intrinsics only
936 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
937 !vd->isLocalVarDecl()) {
938 cgm.errorNYI(e->getSourceRange(),
939 "emitDeclRefLValue: Global Named registers access");
940 return LValue();
941 }
942
943 if (e->isNonOdrUse() == NOUR_Constant &&
944 (vd->getType()->isReferenceType() ||
945 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
946 vd->getAnyInitializer(vd);
947 mlir::Attribute val = ConstantEmitter(*this).emitAbstract(
948 e->getLocation(), *vd->evaluateValue(), vd->getType());
949 assert(val && "failed to emit constant expression");
950
951 Address addr = Address::invalid();
952 if (!vd->getType()->isReferenceType()) {
953 // Spill the constant value to a global.
954 addr = cgm.createUnnamedGlobalFrom(*vd, val,
955 getContext().getDeclAlign(vd));
956 mlir::Type varTy = getTypes().convertTypeForMem(vd->getType());
957 auto ptrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
958 if (ptrTy.getPointee() != varTy) {
959 addr = addr.withElementType(builder, varTy);
960 }
961 } else {
962 // Should we be using the alignment of the constant pointer we emitted?
963 CharUnits alignment = cgm.getNaturalTypeAlignment(
964 e->getType(), /*baseInfo=*/nullptr, /*forPointeeType=*/true);
965 // Classic codegen passes TBAA as null-ptr to the above function, so it
966 // probably needs to deal with that.
968 mlir::Value ptrVal = getBuilder().getConstant(
969 getLoc(e->getSourceRange()), mlir::cast<mlir::TypedAttr>(val));
970 addr = makeNaturalAddressForPointer(ptrVal, ty, alignment);
971 }
972 return makeAddrLValue(addr, ty, AlignmentSource::Decl);
973 }
974
975 // Check for captured variables.
977 vd = vd->getCanonicalDecl();
978 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
979 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
982 }
983 }
984
985 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
986 // Checks for omitted feature handling
993
994 // Check if this is a global variable
995 if (vd->hasLinkage() || vd->isStaticDataMember())
996 return emitGlobalVarDeclLValue(*this, e, vd);
997
998 Address addr = Address::invalid();
999
1000 // The variable should generally be present in the local decl map.
1001 auto iter = localDeclMap.find(vd);
1002 if (iter != localDeclMap.end()) {
1003 addr = iter->second;
1004 } else {
1005 // Otherwise, it might be static local we haven't emitted yet for some
1006 // reason; most likely, because it's in an outer function.
1007 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
1008 }
1009
1010 // Drill into reference types.
1011 LValue lv =
1012 vd->getType()->isReferenceType()
1016
1017 // Statics are defined as globals, so they are not include in the function's
1018 // symbol table.
1019 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
1020 "non-static locals should be already mapped");
1021
1022 return lv;
1023 }
1024
1025 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
1028 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
1029 return LValue();
1030 }
1031 return emitLValue(bd->getBinding());
1032 }
1033
1034 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
1035 LValue lv = emitFunctionDeclLValue(*this, e, fd);
1036
1037 // Emit debuginfo for the function declaration if the target wants to.
1038 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
1040
1041 return lv;
1042 }
1043
1044 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
1045 return LValue();
1046}
1047
1049 QualType boolTy = getContext().BoolTy;
1050 SourceLocation loc = e->getExprLoc();
1051
1053 if (e->getType()->getAs<MemberPointerType>()) {
1054 cgm.errorNYI(e->getSourceRange(),
1055 "evaluateExprAsBool: member pointer type");
1056 return createDummyValue(getLoc(loc), boolTy);
1057 }
1058
1059 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1060 if (!e->getType()->isAnyComplexType())
1061 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
1062
1064 loc);
1065}
1066
1068 UnaryOperatorKind op = e->getOpcode();
1069
1070 // __extension__ doesn't affect lvalue-ness.
1071 if (op == UO_Extension)
1072 return emitLValue(e->getSubExpr());
1073
1074 switch (op) {
1075 case UO_Deref: {
1077 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1078
1080 LValueBaseInfo baseInfo;
1081 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
1082
1083 // Tag 'load' with deref attribute.
1084 // FIXME: This misses some derefence cases and has problematic interactions
1085 // with other operators.
1086 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1087 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1088
1089 LValue lv = makeAddrLValue(addr, t, baseInfo);
1092 return lv;
1093 }
1094 case UO_Real:
1095 case UO_Imag: {
1096 LValue lv = emitLValue(e->getSubExpr());
1097 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1098
1099 // __real is valid on scalars. This is a faster way of testing that.
1100 // __imag can only produce an rvalue on scalars.
1101 if (e->getOpcode() == UO_Real &&
1102 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1103 assert(e->getSubExpr()->getType()->isArithmeticType());
1104 return lv;
1105 }
1106
1108 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1109 mlir::Location loc = getLoc(e->getExprLoc());
1110 Address component =
1111 e->getOpcode() == UO_Real
1112 ? builder.createComplexRealPtr(loc, lv.getAddress())
1113 : builder.createComplexImagPtr(loc, lv.getAddress());
1115 LValue elemLV = makeAddrLValue(component, elemTy);
1116 elemLV.getQuals().addQualifiers(lv.getQuals());
1117 return elemLV;
1118 }
1119 case UO_PreInc:
1120 case UO_PreDec: {
1121 LValue lv = emitLValue(e->getSubExpr());
1122
1123 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1124
1125 if (e->getType()->isAnyComplexType())
1127 else
1129
1130 return lv;
1131 }
1132 case UO_Extension:
1133 llvm_unreachable("UnaryOperator extension should be handled above!");
1134 case UO_Plus:
1135 case UO_Minus:
1136 case UO_Not:
1137 case UO_LNot:
1138 case UO_AddrOf:
1139 case UO_PostInc:
1140 case UO_PostDec:
1141 case UO_Coawait:
1142 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1143 }
1144 llvm_unreachable("Unknown unary operator kind!");
1145}
1146
1147/// If the specified expr is a simple decay from an array to pointer,
1148/// return the array subexpression.
1149/// FIXME: this could be abstracted into a common AST helper.
1150static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1151 // If this isn't just an array->pointer decay, bail out.
1152 const auto *castExpr = dyn_cast<CastExpr>(e);
1153 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1154 return nullptr;
1155
1156 // If this is a decay from variable width array, bail out.
1157 const Expr *subExpr = castExpr->getSubExpr();
1158 if (subExpr->getType()->isVariableArrayType())
1159 return nullptr;
1160
1161 return subExpr;
1162}
1163
1164static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1165 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1166 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1167 return constantOp.getValueAttr<cir::IntAttr>();
1168 return {};
1169}
1170
1171static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1172 CharUnits eltSize) {
1173 // If we have a constant index, we can use the exact offset of the
1174 // element we're accessing.
1175 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1176 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1177 return arrayAlign.alignmentAtOffset(offset);
1178 }
1179 // Otherwise, use the worst-case alignment for any element.
1180 return arrayAlign.alignmentOfArrayElement(eltSize);
1181}
1182
1184 const VariableArrayType *vla) {
1185 QualType eltType;
1186 do {
1187 eltType = vla->getElementType();
1188 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1189 return eltType;
1190}
1191
1193 mlir::Location beginLoc,
1194 mlir::Location endLoc, mlir::Value ptr,
1195 mlir::Type eltTy, mlir::Value idx,
1196 bool shouldDecay) {
1197 CIRGenModule &cgm = cgf.getCIRGenModule();
1198 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1199 // that would enhance tracking this later in CIR?
1201 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1202 shouldDecay);
1203}
1204
1206 mlir::Location beginLoc,
1207 mlir::Location endLoc, Address addr,
1208 QualType eltType, mlir::Value idx,
1209 mlir::Location loc, bool shouldDecay) {
1210
1211 // Determine the element size of the statically-sized base. This is
1212 // the thing that the indices are expressed in terms of.
1213 if (const VariableArrayType *vla =
1214 cgf.getContext().getAsVariableArrayType(eltType)) {
1215 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1216 }
1217
1218 // We can use that to compute the best alignment of the element.
1219 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1220 const CharUnits eltAlign =
1221 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1222
1224 const mlir::Value eltPtr =
1225 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1226 addr.getElementType(), idx, shouldDecay);
1227 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1228 return Address(eltPtr, elementType, eltAlign);
1229}
1230
1231LValue
1233 if (e->getType()->getAs<ObjCObjectType>()) {
1234 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1236 }
1237
1238 // The index must always be an integer, which is not an aggregate. Emit it
1239 // in lexical order (this complexity is, sadly, required by C++17).
1240 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1241 "index was neither LHS nor RHS");
1242
1243 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1244 const mlir::Value idx = emitScalarExpr(e->getIdx());
1245
1246 // Extend or truncate the index type to 32 or 64-bits.
1247 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1248 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1249 cgm.errorNYI(e->getSourceRange(),
1250 "emitArraySubscriptExpr: index type cast");
1251 return idx;
1252 };
1253
1254 // If the base is a vector type, then we are forming a vector element
1255 // with this subscript.
1256 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1258 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1259 const LValue lv = emitLValue(e->getBase());
1260 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1261 lv.getBaseInfo());
1262 }
1263
1264 // The HLSL runtime handles subscript expressions on global resource arrays
1265 // and objects with HLSL buffer layouts.
1266 if (getLangOpts().HLSL) {
1267 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1268 return {};
1269 }
1270
1271 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1272
1273 // Handle the extvector case we ignored above.
1275 const LValue lv = emitLValue(e->getBase());
1276 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1277
1278 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1279 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1280 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1281 idx, cgm.getLoc(e->getExprLoc()),
1282 /*shouldDecay=*/false);
1283
1284 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1285 }
1286
1287 if (const VariableArrayType *vla =
1288 getContext().getAsVariableArrayType(e->getType())) {
1289 // The base must be a pointer, which is not an aggregate. Emit
1290 // it. It needs to be emitted first in case it's what captures
1291 // the VLA bounds.
1293
1294 // The element count here is the total number of non-VLA elements.
1295 mlir::Value numElements = getVLASize(vla).numElts;
1296 idx = builder.createIntCast(idx, numElements.getType());
1297
1298 // Effectively, the multiply by the VLA size is part of the GEP.
1299 // GEP indexes are signed, and scaling an index isn't permitted to
1300 // signed-overflow, so we use the same semantics for our explicit
1301 // multiply. We suppress this if overflow is not undefined behavior.
1302 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1305 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1306 overflowBehavior);
1307
1308 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1309 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1310 idx, cgm.getLoc(e->getExprLoc()),
1311 /*shouldDecay=*/false);
1312
1313 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1314 }
1315
1316 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1317 LValue arrayLV;
1318 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1319 arrayLV = emitArraySubscriptExpr(ase);
1320 else
1321 arrayLV = emitLValue(array);
1322
1323 // Propagate the alignment from the array itself to the result.
1324 const Address addr = emitArraySubscriptPtr(
1325 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1326 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1327 /*shouldDecay=*/true);
1328
1329 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1330
1331 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1332 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1333 }
1334
1335 return lv;
1336 }
1337
1338 // The base must be a pointer; emit it with an estimate of its alignment.
1339 assert(e->getBase()->getType()->isPointerType() &&
1340 "The base must be a pointer");
1341
1342 LValueBaseInfo eltBaseInfo;
1343 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1344 // Propagate the alignment from the array itself to the result.
1345 const Address addxr = emitArraySubscriptPtr(
1346 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1347 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1348 /*shouldDecay=*/false);
1349
1350 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1351
1352 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1353 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1354 }
1355
1356 return lv;
1357}
1358
1360 // Emit the base vector as an l-value.
1361 LValue base;
1362
1363 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1364 if (e->isArrow()) {
1365 // If it is a pointer to a vector, emit the address and form an lvalue with
1366 // it.
1367 LValueBaseInfo baseInfo;
1368 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1369 const auto *clangPtrTy =
1371 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1372 base.getQuals().removeObjCGCAttr();
1373 } else if (e->getBase()->isGLValue()) {
1374 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1375 // emit the base as an lvalue.
1376 assert(e->getBase()->getType()->isVectorType());
1377 base = emitLValue(e->getBase());
1378 } else {
1379 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1380 assert(e->getBase()->getType()->isVectorType() &&
1381 "Result must be a vector");
1382 mlir::Value vec = emitScalarExpr(e->getBase());
1383
1384 // Store the vector to memory (because LValue wants an address).
1385 QualType baseTy = e->getBase()->getType();
1386 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1387 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1388 cgm.errorNYI(e->getSourceRange(),
1389 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1390 return {};
1391 }
1392 builder.createStore(vec.getLoc(), vec, vecMem);
1393 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1394 }
1395
1396 QualType type =
1398
1399 // Encode the element access list into a vector of unsigned indices.
1401 e->getEncodedElementAccess(indices);
1402
1403 if (base.isSimple()) {
1404 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1405 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1406 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1407 base.getBaseInfo());
1408 }
1409
1410 cgm.errorNYI(e->getSourceRange(),
1411 "emitExtVectorElementExpr: isSimple is false");
1412 return {};
1413}
1414
1416 llvm::StringRef name) {
1417 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1418 assert(globalOp.getAlignment() && "expected alignment for string literal");
1419 unsigned align = *(globalOp.getAlignment());
1420 mlir::Value addr =
1421 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1422 return makeAddrLValue(
1423 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1425}
1426
1427/// Casts are never lvalues unless that cast is to a reference type. If the cast
1428/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1429/// is needed by the code generator in an lvalue context, then it must mean that
1430/// we need the address of an aggregate in order to access one of its members.
1431/// This can happen for all the reasons that casts are permitted with aggregate
1432/// result, including noop aggregate casts, and cast from scalar to union.
1434 switch (e->getCastKind()) {
1435 case CK_ToVoid:
1436 case CK_BitCast:
1437 case CK_LValueToRValueBitCast:
1438 case CK_ArrayToPointerDecay:
1439 case CK_FunctionToPointerDecay:
1440 case CK_NullToMemberPointer:
1441 case CK_NullToPointer:
1442 case CK_IntegralToPointer:
1443 case CK_PointerToIntegral:
1444 case CK_PointerToBoolean:
1445 case CK_IntegralCast:
1446 case CK_BooleanToSignedIntegral:
1447 case CK_IntegralToBoolean:
1448 case CK_IntegralToFloating:
1449 case CK_FloatingToIntegral:
1450 case CK_FloatingToBoolean:
1451 case CK_FloatingCast:
1452 case CK_FloatingRealToComplex:
1453 case CK_FloatingComplexToReal:
1454 case CK_FloatingComplexToBoolean:
1455 case CK_FloatingComplexCast:
1456 case CK_FloatingComplexToIntegralComplex:
1457 case CK_IntegralRealToComplex:
1458 case CK_IntegralComplexToReal:
1459 case CK_IntegralComplexToBoolean:
1460 case CK_IntegralComplexCast:
1461 case CK_IntegralComplexToFloatingComplex:
1462 case CK_DerivedToBaseMemberPointer:
1463 case CK_BaseToDerivedMemberPointer:
1464 case CK_MemberPointerToBoolean:
1465 case CK_ReinterpretMemberPointer:
1466 case CK_AnyPointerToBlockPointerCast:
1467 case CK_ARCProduceObject:
1468 case CK_ARCConsumeObject:
1469 case CK_ARCReclaimReturnedObject:
1470 case CK_ARCExtendBlockObject:
1471 case CK_CopyAndAutoreleaseBlockObject:
1472 case CK_IntToOCLSampler:
1473 case CK_FloatingToFixedPoint:
1474 case CK_FixedPointToFloating:
1475 case CK_FixedPointCast:
1476 case CK_FixedPointToBoolean:
1477 case CK_FixedPointToIntegral:
1478 case CK_IntegralToFixedPoint:
1479 case CK_MatrixCast:
1480 case CK_HLSLVectorTruncation:
1481 case CK_HLSLMatrixTruncation:
1482 case CK_HLSLArrayRValue:
1483 case CK_HLSLElementwiseCast:
1484 case CK_HLSLAggregateSplatCast:
1485 llvm_unreachable("unexpected cast lvalue");
1486
1487 case CK_Dependent:
1488 llvm_unreachable("dependent cast kind in IR gen!");
1489
1490 case CK_BuiltinFnToFnPtr:
1491 llvm_unreachable("builtin functions are handled elsewhere");
1492
1493 case CK_Dynamic: {
1494 LValue lv = emitLValue(e->getSubExpr());
1495 Address v = lv.getAddress();
1496 const auto *dce = cast<CXXDynamicCastExpr>(e);
1498 }
1499
1500 // These are never l-values; just use the aggregate emission code.
1501 case CK_ToUnion:
1502 return emitAggExprToLValue(e);
1503
1504 case CK_NonAtomicToAtomic:
1505 case CK_AtomicToNonAtomic:
1506 case CK_ObjCObjectLValueCast:
1507 case CK_VectorSplat:
1508 case CK_ConstructorConversion:
1509 case CK_UserDefinedConversion:
1510 case CK_CPointerToObjCPointerCast:
1511 case CK_BlockPointerToObjCPointerCast:
1512 case CK_LValueToRValue: {
1513 cgm.errorNYI(e->getSourceRange(),
1514 std::string("emitCastLValue for unhandled cast kind: ") +
1515 e->getCastKindName());
1516
1517 return {};
1518 }
1519
1520 case CK_AddressSpaceConversion: {
1521 LValue lv = emitLValue(e->getSubExpr());
1522 QualType destTy = getContext().getPointerType(e->getType());
1523
1524 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1525 mlir::ptr::MemorySpaceAttrInterface srcAS;
1526 if (clang::isTargetAddressSpace(srcLangAS))
1527 srcAS = cir::toCIRAddressSpaceAttr(getMLIRContext(), srcLangAS);
1528 else
1529 cgm.errorNYI(
1530 e->getSourceRange(),
1531 "emitCastLValue: address space conversion from unknown address "
1532 "space");
1533
1534 mlir::Value v = performAddrSpaceCast(lv.getPointer(), convertType(destTy));
1535
1537 lv.getAddress().getAlignment()),
1538 e->getType(), lv.getBaseInfo());
1539 }
1540
1541 case CK_LValueBitCast: {
1542 // This must be a reinterpret_cast (or c-style equivalent).
1543 const auto *ce = cast<ExplicitCastExpr>(e);
1544
1545 cgm.emitExplicitCastExprType(ce, this);
1546 LValue LV = emitLValue(e->getSubExpr());
1548 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1549
1550 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1551 }
1552
1553 case CK_NoOp: {
1554 // CK_NoOp can model a qualification conversion, which can remove an array
1555 // bound and change the IR type.
1556 LValue lv = emitLValue(e->getSubExpr());
1557 // Propagate the volatile qualifier to LValue, if exists in e.
1559 lv.getQuals() = e->getType().getQualifiers();
1560 if (lv.isSimple()) {
1561 Address v = lv.getAddress();
1562 if (v.isValid()) {
1563 mlir::Type ty = convertTypeForMem(e->getType());
1564 if (v.getElementType() != ty) {
1565 // We have only inspected/reproduced this with complete to incomplete
1566 // array types, so we do an NYI for other cases, so we can make sure
1567 // we're doing a conversion we want to be making.
1568 auto fromTy = dyn_cast<cir::ArrayType>(v.getElementType());
1569 auto toTy = dyn_cast<cir::ArrayType>(ty);
1570 if (!fromTy || !toTy ||
1571 fromTy.getElementType() != toTy.getElementType() ||
1572 toTy.getSize() != 0)
1573 cgm.errorNYI(e->getSourceRange(),
1574 "emitCastLValue NoOp not array-shrink case");
1575
1576 lv = makeAddrLValue(v.withElementType(builder, ty), e->getType(),
1577 lv.getBaseInfo());
1578 }
1579 }
1580 }
1581 return lv;
1582 }
1583
1584 case CK_UncheckedDerivedToBase:
1585 case CK_DerivedToBase: {
1586 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1587
1588 LValue lv = emitLValue(e->getSubExpr());
1589 Address thisAddr = lv.getAddress();
1590
1591 // Perform the derived-to-base conversion
1592 Address baseAddr =
1593 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1594 /*NullCheckValue=*/false, e->getExprLoc());
1595
1596 // TODO: Support accesses to members of base classes in TBAA. For now, we
1597 // conservatively pretend that the complete object is of the base class
1598 // type.
1600 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1601 }
1602
1603 case CK_BaseToDerived: {
1604 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1605 LValue lv = emitLValue(e->getSubExpr());
1606
1607 // Perform the base-to-derived conversion
1609 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1610 e->path(), /*NullCheckValue=*/false);
1611 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1612 // performed and the object is not of the derived type.
1614
1616 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1617 }
1618
1619 case CK_ZeroToOCLOpaqueType:
1620 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1621 }
1622
1623 llvm_unreachable("Invalid cast kind");
1624}
1625
1627 const MemberExpr *me) {
1628 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1629 // Try to emit static variable member expressions as DREs.
1630 return DeclRefExpr::Create(
1632 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1633 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1634 }
1635 return nullptr;
1636}
1637
1639 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1641 return emitDeclRefLValue(dre);
1642 }
1643
1644 Expr *baseExpr = e->getBase();
1645 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1646 LValue baseLV;
1647 if (e->isArrow()) {
1648 LValueBaseInfo baseInfo;
1650 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1651 QualType ptrTy = baseExpr->getType()->getPointeeType();
1653 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1654 } else {
1656 baseLV = emitLValue(baseExpr);
1657 }
1658
1659 const NamedDecl *nd = e->getMemberDecl();
1660 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1661 LValue lv = emitLValueForField(baseLV, field);
1663 if (getLangOpts().OpenMP) {
1664 // If the member was explicitly marked as nontemporal, mark it as
1665 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1666 // to children as nontemporal too.
1667 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1668 }
1669 return lv;
1670 }
1671
1672 if (const auto *fd = dyn_cast<FunctionDecl>(nd))
1673 return emitFunctionDeclLValue(*this, e, fd);
1674
1675 llvm_unreachable("Unhandled member declaration!");
1676}
1677
1678/// Evaluate an expression into a given memory location.
1680 Qualifiers quals, bool isInit) {
1681 // FIXME: This function should take an LValue as an argument.
1682 switch (getEvaluationKind(e->getType())) {
1683 case cir::TEK_Complex: {
1684 LValue lv = makeAddrLValue(location, e->getType());
1685 emitComplexExprIntoLValue(e, lv, isInit);
1686 return;
1687 }
1688
1689 case cir::TEK_Aggregate: {
1690 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1694 return;
1695 }
1696
1697 case cir::TEK_Scalar: {
1699 LValue lv = makeAddrLValue(location, e->getType());
1700 emitStoreThroughLValue(rv, lv);
1701 return;
1702 }
1703 }
1704
1705 llvm_unreachable("bad evaluation kind");
1706}
1707
1709 const MaterializeTemporaryExpr *m,
1710 const Expr *inner) {
1711 // TODO(cir): cgf.getTargetHooks();
1712 switch (m->getStorageDuration()) {
1713 case SD_FullExpression:
1714 case SD_Automatic: {
1715 QualType ty = inner->getType();
1716
1718
1719 // The temporary memory should be created in the same scope as the extending
1720 // declaration of the temporary materialization expression.
1721 cir::AllocaOp extDeclAlloca;
1722 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1723 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1724 if (extDeclAddrIter != cgf.localDeclMap.end())
1725 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1726 }
1727 mlir::OpBuilder::InsertPoint ip;
1728 if (extDeclAlloca) {
1729 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1730 } else if (cgf.isInConditionalBranch() &&
1732 // Place in the function entry block so the alloca dominates both
1733 // regions of any enclosing cir.cleanup.scope. The default path
1734 // would use curLexScope which may be a ternary branch.
1737 }
1738 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1739 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1740 ip);
1741 }
1742 case SD_Thread:
1743 case SD_Static: {
1744 auto addr =
1745 mlir::cast<cir::GlobalOp>(cgf.cgm.getAddrOfGlobalTemporary(m, inner));
1746 auto getGlobal = cgf.cgm.getBuilder().createGetGlobal(addr);
1747 assert(addr.getAlignment().has_value() &&
1748 "This should always have an alignment");
1749 return Address(getGlobal,
1750 clang::CharUnits::fromQuantity(addr.getAlignment().value()));
1751 }
1752
1753 case SD_Dynamic:
1754 llvm_unreachable("temporary can't have dynamic storage duration");
1755 }
1756 llvm_unreachable("unknown storage duration");
1757}
1758
1760 const MaterializeTemporaryExpr *m,
1761 const Expr *e, Address referenceTemporary) {
1762 // Objective-C++ ARC:
1763 // If we are binding a reference to a temporary that has ownership, we
1764 // need to perform retain/release operations on the temporary.
1765 //
1766 // FIXME(ogcg): This should be looking at e, not m.
1767 if (m->getType().getObjCLifetime()) {
1768 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1769 return;
1770 }
1771
1773 if (dk == QualType::DK_none)
1774 return;
1775
1776 switch (m->getStorageDuration()) {
1777 case SD_Static:
1778 case SD_Thread: {
1779 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1780 if (const auto *classDecl =
1782 classDecl && !classDecl->hasTrivialDestructor())
1783 // Get the destructor for the reference temporary.
1784 referenceTemporaryDtor = classDecl->getDestructor();
1785
1786 if (!referenceTemporaryDtor)
1787 return;
1788
1789 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1790 "storage duration with destructors");
1791 break;
1792 }
1793
1794 case SD_FullExpression:
1795 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1797 break;
1798
1799 case SD_Automatic:
1801 NormalAndEHCleanup, referenceTemporary, e->getType(),
1803 break;
1804
1805 case SD_Dynamic:
1806 llvm_unreachable("temporary cannot have dynamic storage duration");
1807 }
1808}
1809
1811 const MaterializeTemporaryExpr *m) {
1812 const Expr *e = m->getSubExpr();
1813
1814 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1815 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1816 "Reference should never be pseudo-strong!");
1817
1818 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1819 // as that will cause the lifetime adjustment to be lost for ARC
1820 auto ownership = m->getType().getObjCLifetime();
1821 if (ownership != Qualifiers::OCL_None &&
1822 ownership != Qualifiers::OCL_ExplicitNone) {
1823 cgm.errorNYI(e->getSourceRange(),
1824 "emitMaterializeTemporaryExpr: ObjCLifetime");
1825 return {};
1826 }
1827
1830 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1831
1832 for (const Expr *ignored : commaLHSs)
1833 emitIgnoredExpr(ignored);
1834
1835 if (isa<OpaqueValueExpr>(e)) {
1836 cgm.errorNYI(e->getSourceRange(),
1837 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1838 return {};
1839 }
1840
1841 // Create and initialize the reference temporary.
1842 Address object = createReferenceTemporary(*this, m, e);
1843
1844 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1845 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1846 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1847 return {};
1848 } else {
1850 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1851 }
1852 pushTemporaryCleanup(*this, m, e, object);
1853
1854 // Perform derived-to-base casts and/or field accesses, to get from the
1855 // temporary object we created (and, potentially, for which we extended
1856 // the lifetime) to the subobject we're binding the reference to.
1857 if (!adjustments.empty()) {
1858 cgm.errorNYI(e->getSourceRange(),
1859 "emitMaterializeTemporaryExpr: Adjustments");
1860 return {};
1861 }
1862
1863 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1864}
1865
1866LValue
1869
1870 auto it = opaqueLValues.find(e);
1871 if (it != opaqueLValues.end())
1872 return it->second;
1873
1874 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1875 return emitLValue(e->getSourceExpr());
1876}
1877
1878RValue
1881
1882 auto it = opaqueRValues.find(e);
1883 if (it != opaqueRValues.end())
1884 return it->second;
1885
1886 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1887 return emitAnyExpr(e->getSourceExpr());
1888}
1889
1891 if (e->isFileScope()) {
1892 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1893 return {};
1894 }
1895
1896 if (e->getType()->isVariablyModifiedType())
1898
1899 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1900 ".compoundliteral");
1901 const Expr *initExpr = e->getInitializer();
1902 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1903
1904 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1905 /*Init*/ true);
1906
1907 // Block-scope compound literals are destroyed at the end of the enclosing
1908 // scope in C.
1909 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1910 cgm.errorNYI(e->getSourceRange(),
1911 "emitCompoundLiteralLValue: non C++ DestructedType");
1912 return {};
1913 }
1914
1915 return result;
1916}
1917
1919 RValue rv = emitCallExpr(e);
1920
1921 if (!rv.isScalar())
1922 return makeAddrLValue(rv.getAggregateAddress(), e->getType(),
1924
1925 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1926 "Can't have a scalar return unless the return type is a "
1927 "reference type!");
1928
1930}
1931
1933 // Comma expressions just emit their LHS then their RHS as an l-value.
1934 if (e->getOpcode() == BO_Comma) {
1935 emitIgnoredExpr(e->getLHS());
1936 return emitLValue(e->getRHS());
1937 }
1938
1939 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1941
1942 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1943
1944 // Note that in all of these cases, __block variables need the RHS
1945 // evaluated first just in case the variable gets moved by the RHS.
1946
1948 case cir::TEK_Scalar: {
1950 if (e->getLHS()->getType().getObjCLifetime() !=
1952 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1953 return {};
1954 }
1955
1956 RValue rv = emitAnyExpr(e->getRHS());
1957 LValue lv = emitLValue(e->getLHS());
1958
1959 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1960 if (lv.isBitField())
1962 else
1963 emitStoreThroughLValue(rv, lv);
1964
1965 if (getLangOpts().OpenMP) {
1966 cgm.errorNYI(e->getSourceRange(), "openmp");
1967 return {};
1968 }
1969
1970 return lv;
1971 }
1972
1973 case cir::TEK_Complex: {
1975 }
1976
1977 case cir::TEK_Aggregate:
1978 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1979 return {};
1980 }
1981 llvm_unreachable("bad evaluation kind");
1982}
1983
1984/// Emit code to compute the specified expression which
1985/// can have any type. The result is returned as an RValue struct.
1987 bool ignoreResult) {
1989 case cir::TEK_Scalar:
1990 return RValue::get(emitScalarExpr(e, ignoreResult));
1991 case cir::TEK_Complex:
1993 case cir::TEK_Aggregate: {
1994 if (!ignoreResult && aggSlot.isIgnored())
1995 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1997 emitAggExpr(e, aggSlot);
1998 return aggSlot.asRValue();
1999 }
2000 }
2001 llvm_unreachable("bad evaluation kind");
2002}
2003
2004// Detect the unusual situation where an inline version is shadowed by a
2005// non-inline version. In that case we should pick the external one
2006// everywhere. That's GCC behavior too.
2008 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
2009 if (!pd->isInlineBuiltinDeclaration())
2010 return false;
2011 return true;
2012}
2013
2014CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
2015 const auto *fd = cast<FunctionDecl>(gd.getDecl());
2016
2017 if (unsigned builtinID = fd->getBuiltinID()) {
2018 StringRef ident = cgm.getMangledName(gd);
2019 std::string fdInlineName = (ident + ".inline").str();
2020
2021 bool isPredefinedLibFunction =
2022 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
2023 // TODO: Read no-builtin function attribute and set this accordingly.
2024 // Using false here matches OGCG's default behavior - builtins are called
2025 // as builtins unless explicitly disabled. The previous value of true was
2026 // overly conservative and caused functions to be marked as no_inline when
2027 // they shouldn't be.
2028 bool hasAttributeNoBuiltin = false;
2030
2031 // When directing calling an inline builtin, call it through it's mangled
2032 // name to make it clear it's not the actual builtin.
2033 auto fn = cast<cir::FuncOp>(curFn);
2034 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
2035 cir::FuncOp clone =
2036 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
2037
2038 if (!clone) {
2039 // Create a forward declaration - the body will be generated in
2040 // generateCode when the function definition is processed
2041 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
2042 mlir::OpBuilder::InsertionGuard guard(builder);
2043 builder.setInsertionPointToStart(cgm.getModule().getBody());
2044
2045 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
2046 calleeFunc.getFunctionType());
2047 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
2048 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
2049 clone.setSymVisibility("private");
2050 clone.setInlineKind(cir::InlineKind::AlwaysInline);
2051 }
2052 return CIRGenCallee::forDirect(clone, gd);
2053 }
2054
2055 // Replaceable builtins provide their own implementation of a builtin. If we
2056 // are in an inline builtin implementation, avoid trivial infinite
2057 // recursion. Honor __attribute__((no_builtin("foo"))) or
2058 // __attribute__((no_builtin)) on the current function unless foo is
2059 // not a predefined library function which means we must generate the
2060 // builtin no matter what.
2061 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
2062 return CIRGenCallee::forBuiltin(builtinID, fd);
2063 }
2064
2065 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
2066
2067 if ((cgm.getLangOpts().CUDA || cgm.getLangOpts().HIP) &&
2068 !cgm.getLangOpts().CUDAIsDevice && fd->hasAttr<CUDAGlobalAttr>()) {
2069 mlir::Operation *handle = cgm.getCUDARuntime().getKernelHandle(callee, gd);
2070 callee =
2071 mlir::cast<cir::FuncOp>(*cgm.getCUDARuntime().getKernelStub(handle));
2072 }
2073
2074 return CIRGenCallee::forDirect(callee, gd);
2075}
2076
2078 if (ty->isVoidType())
2079 return RValue::get(nullptr);
2080
2081 cgm.errorNYI("unsupported type for undef rvalue");
2082 return RValue::get(nullptr);
2083}
2084
2086 const CIRGenCallee &origCallee,
2087 const clang::CallExpr *e,
2089 // Get the actual function type. The callee type will always be a pointer to
2090 // function type or a block pointer type.
2091 assert(calleeTy->isFunctionPointerType() &&
2092 "Callee must have function pointer type!");
2093
2094 calleeTy = getContext().getCanonicalType(calleeTy);
2095 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
2096
2097 CIRGenCallee callee = origCallee;
2098
2099 if (getLangOpts().CPlusPlus)
2101
2102 const auto *fnType = cast<FunctionType>(pointeeTy);
2103
2105
2106 CallArgList args;
2108
2109 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
2110 e->getDirectCallee());
2111
2112 const CIRGenFunctionInfo &funcInfo =
2113 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2114
2115 // C99 6.5.2.2p6:
2116 // If the expression that denotes the called function has a type that does
2117 // not include a prototype, [the default argument promotions are performed].
2118 // If the number of arguments does not equal the number of parameters, the
2119 // behavior is undefined. If the function is defined with a type that
2120 // includes a prototype, and either the prototype ends with an ellipsis (,
2121 // ...) or the types of the arguments after promotion are not compatible
2122 // with the types of the parameters, the behavior is undefined. If the
2123 // function is defined with a type that does not include a prototype, and
2124 // the types of the arguments after promotion are not compatible with those
2125 // of the parameters after promotion, the behavior is undefined [except in
2126 // some trivial cases].
2127 // That is, in the general case, we should assume that a call through an
2128 // unprototyped function type works like a *non-variadic* call. The way we
2129 // make this work is to cast to the exxact type fo the promoted arguments.
2130 if (isa<FunctionNoProtoType>(fnType)) {
2133 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2134 // get non-variadic function type
2135 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2136 calleeTy.getReturnType(), false);
2137 auto calleePtrTy = cir::PointerType::get(calleeTy);
2138
2139 mlir::Operation *fn = callee.getFunctionPointer();
2140 mlir::Value addr;
2141 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2142 addr = cir::GetGlobalOp::create(
2143 builder, getLoc(e->getSourceRange()),
2144 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2145 } else {
2146 addr = fn->getResult(0);
2147 }
2148
2149 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2150 callee.setFunctionPointer(fn);
2151 }
2152
2154 assert(!cir::MissingFeatures::hip());
2156
2157 cir::CIRCallOpInterface callOp;
2158 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2159 getLoc(e->getExprLoc()));
2160
2162
2163 return callResult;
2164}
2165
2167 e = e->IgnoreParens();
2168
2169 // Look through function-to-pointer decay.
2170 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2171 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2172 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2173 return emitCallee(implicitCast->getSubExpr());
2174 }
2175 // When performing an indirect call through a function pointer lvalue, the
2176 // function pointer lvalue is implicitly converted to an rvalue through an
2177 // lvalue-to-rvalue conversion.
2178 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2179 "unexpected implicit cast on function pointers");
2180 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2181 // Resolve direct calls.
2182 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2183 return emitDirectCallee(funcDecl);
2184 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2185 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2186 emitIgnoredExpr(me->getBase());
2187 return emitDirectCallee(fd);
2188 }
2189 // Else fall through to the indirect reference handling below.
2190 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2192 }
2193
2194 // Otherwise, we have an indirect reference.
2195 mlir::Value calleePtr;
2197 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2198 calleePtr = emitScalarExpr(e);
2199 functionType = ptrType->getPointeeType();
2200 } else {
2201 functionType = e->getType();
2202 calleePtr = emitLValue(e).getPointer();
2203 }
2204 assert(functionType->isFunctionType());
2205
2206 GlobalDecl gd;
2207 if (const auto *vd =
2208 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2209 gd = GlobalDecl(vd);
2210
2211 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2212 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2213 return callee;
2214}
2215
2219
2220 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2222
2223 if (const auto *cudaKernelCallExpr = dyn_cast<CUDAKernelCallExpr>(e))
2225
2226 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2227 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2228 // operator member call.
2229 if (const CXXMethodDecl *md =
2230 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2231 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2232 // A CXXOperatorCallExpr is created even for explicit object methods, but
2233 // these should be treated like static function calls. Fall through to do
2234 // that.
2235 }
2236
2237 CIRGenCallee callee = emitCallee(e->getCallee());
2238
2239 if (callee.isBuiltin())
2240 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2241 returnValue);
2242
2243 if (callee.isPseudoDestructor())
2245
2246 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2247}
2248
2249/// Emit code to compute the specified expression, ignoring the result.
2251 if (e->isPRValue()) {
2252 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2253 return;
2254 }
2255
2256 // Just emit it as an l-value and drop the result.
2257 emitLValue(e);
2258}
2259
2261 LValueBaseInfo *baseInfo) {
2263 assert(e->getType()->isArrayType() &&
2264 "Array to pointer decay must have array source type!");
2265
2266 // Expressions of array type can't be bitfields or vector elements.
2267 LValue lv = emitLValue(e);
2268 Address addr = lv.getAddress();
2269
2270 // If the array type was an incomplete type, we need to make sure
2271 // the decay ends up being the right type.
2272 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2273
2274 if (e->getType()->isVariableArrayType())
2275 return addr;
2276
2277 [[maybe_unused]] auto pointeeTy =
2278 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2279
2280 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2281 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2282 assert(pointeeTy == arrayTy);
2283
2284 // The result of this decay conversion points to an array element within the
2285 // base lvalue. However, since TBAA currently does not support representing
2286 // accesses to elements of member arrays, we conservatively represent accesses
2287 // to the pointee object as if it had no any base lvalue specified.
2288 // TODO: Support TBAA for member arrays.
2291
2292 mlir::Value ptr = builder.maybeBuildArrayDecay(
2293 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2294 convertTypeForMem(eltType));
2295 return Address(ptr, addr.getAlignment());
2296}
2297
2298/// Given the address of a temporary variable, produce an r-value of its type.
2302 switch (getEvaluationKind(type)) {
2303 case cir::TEK_Complex:
2304 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2305 case cir::TEK_Aggregate:
2306 return lvalue.asAggregateRValue();
2307 case cir::TEK_Scalar:
2308 return RValue::get(emitLoadOfScalar(lvalue, loc));
2309 }
2310 llvm_unreachable("bad evaluation kind");
2311}
2312
2313/// Emit an `if` on a boolean condition, filling `then` and `else` into
2314/// appropriated regions.
2315mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2316 const Stmt *thenS,
2317 const Stmt *elseS) {
2318 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2319 std::optional<mlir::Location> elseLoc;
2320 if (elseS)
2321 elseLoc = getLoc(elseS->getSourceRange());
2322
2323 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2325 cond, /*thenBuilder=*/
2326 [&](mlir::OpBuilder &, mlir::Location) {
2327 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2328 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2329 },
2330 thenLoc,
2331 /*elseBuilder=*/
2332 [&](mlir::OpBuilder &, mlir::Location) {
2333 assert(elseLoc && "Invalid location for elseS.");
2334 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2335 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2336 },
2337 elseLoc);
2338
2339 return mlir::LogicalResult::success(resThen.succeeded() &&
2340 resElse.succeeded());
2341}
2342
2343/// Emit an `if` on a boolean condition, filling `then` and `else` into
2344/// appropriated regions.
2346 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2347 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2348 std::optional<mlir::Location> elseLoc) {
2349 // Attempt to be as accurate as possible with IfOp location, generate
2350 // one fused location that has either 2 or 4 total locations, depending
2351 // on else's availability.
2352 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2353 if (elseLoc)
2354 ifLocs.push_back(*elseLoc);
2355 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2356
2357 // Emit the code with the fully general case.
2358 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2359 cir::IfOp ifOp = cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2360 /*thenBuilder=*/thenBuilder,
2361 /*elseBuilder=*/elseBuilder);
2362 terminateStructuredRegionBody(ifOp.getThenRegion(), thenLoc);
2363 assert((elseLoc.has_value() || ifOp.getElseRegion().empty()) &&
2364 "else region created with no else location");
2365 if (elseLoc.has_value())
2366 terminateStructuredRegionBody(ifOp.getElseRegion(), *elseLoc);
2367 return ifOp;
2368}
2369
2370/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2371mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2372 const Expr *cond) {
2375 cond = cond->IgnoreParens();
2376
2377 // In LLVM the condition is reversed here for efficient codegen.
2378 // This should be done in CIR prior to LLVM lowering, if we do now
2379 // we can make CIR based diagnostics misleading.
2380 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2382
2383 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2384 Expr *trueExpr = condOp->getTrueExpr();
2385 Expr *falseExpr = condOp->getFalseExpr();
2386 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2387
2388 mlir::Value ternaryOpRes =
2389 cir::TernaryOp::create(
2390 builder, loc, condV, /*thenBuilder=*/
2391 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2392 mlir::Value lhs = emitScalarExpr(trueExpr);
2393 cir::YieldOp::create(b, loc, lhs);
2394 },
2395 /*elseBuilder=*/
2396 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2397 mlir::Value rhs = emitScalarExpr(falseExpr);
2398 cir::YieldOp::create(b, loc, rhs);
2399 })
2400 .getResult();
2401
2402 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2403 getContext().BoolTy, condOp->getExprLoc());
2404 }
2405
2406 if (isa<CXXThrowExpr>(cond)) {
2407 cgm.errorNYI("NYI");
2408 return createDummyValue(loc, cond->getType());
2409 }
2410
2411 // If the branch has a condition wrapped by __builtin_unpredictable,
2412 // create metadata that specifies that the branch is unpredictable.
2413 // Don't bother if not optimizing because that metadata would not be used.
2415
2416 // Emit the code with the fully general case.
2417 return evaluateExprAsBool(cond);
2418}
2419
2420mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2421 mlir::Location loc, CharUnits alignment,
2422 bool insertIntoFnEntryBlock,
2423 mlir::Value arraySize) {
2424 mlir::Block *entryBlock = insertIntoFnEntryBlock
2426 : curLexScope->getEntryBlock();
2427
2428 // If this is an alloca in the entry basic block of a cir.try and there's
2429 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2430 // scope instead. This is necessary in order to guarantee all SSA values are
2431 // reachable during cleanups.
2432 if (auto tryOp =
2433 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2434 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2435 entryBlock = &scopeOp.getScopeRegion().front();
2436 }
2437
2438 return emitAlloca(name, ty, loc, alignment,
2439 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2440}
2441
2442mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2443 mlir::Location loc, CharUnits alignment,
2444 mlir::OpBuilder::InsertPoint ip,
2445 mlir::Value arraySize) {
2446 // CIR uses its own alloca address space rather than follow the target data
2447 // layout like original CodeGen. The data layout awareness should be done in
2448 // the lowering pass instead.
2449 cir::PointerType localVarPtrTy =
2451 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2452
2453 mlir::Value addr;
2454 {
2455 mlir::OpBuilder::InsertionGuard guard(builder);
2456 builder.restoreInsertionPoint(ip);
2457 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2458 /*var type*/ ty, name, alignIntAttr, arraySize);
2460 }
2461 return addr;
2462}
2463
2464// Note: this function also emit constructor calls to support a MSVC extensions
2465// allowing explicit constructor function call.
2468 const Expr *callee = ce->getCallee()->IgnoreParens();
2469
2470 if (isa<BinaryOperator>(callee))
2472
2473 const auto *me = cast<MemberExpr>(callee);
2474 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2475
2476 if (md->isStatic()) {
2477 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2478 return RValue::get(nullptr);
2479 }
2480
2481 bool hasQualifier = me->hasQualifier();
2482 NestedNameSpecifier qualifier = me->getQualifier();
2483 bool isArrow = me->isArrow();
2484 const Expr *base = me->getBase();
2485
2487 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2488}
2489
2491 // Emit the expression as an lvalue.
2492 LValue lv = emitLValue(e);
2493 assert(lv.isSimple());
2494 mlir::Value value = lv.getPointer();
2495
2497
2498 return RValue::get(value);
2499}
2500
2502 LValueBaseInfo *pointeeBaseInfo) {
2503 if (refLVal.isVolatile())
2504 cgm.errorNYI(loc, "load of volatile reference");
2505
2506 cir::LoadOp load =
2507 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2508 refLVal.getAddress().getPointer());
2509
2511
2512 QualType pointeeType = refLVal.getType()->getPointeeType();
2513 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2514 return Address(load, convertTypeForMem(pointeeType), align);
2515}
2516
2518 mlir::Location loc,
2519 QualType refTy,
2520 AlignmentSource source) {
2521 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2522 LValueBaseInfo pointeeBaseInfo;
2524 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2525 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2526 pointeeBaseInfo);
2527}
2528
2529void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2530 cir::TrapOp::create(builder, loc);
2531 if (createNewBlock)
2532 builder.createBlock(builder.getBlock()->getParent());
2533}
2534
2536 bool createNewBlock) {
2538 cir::UnreachableOp::create(builder, getLoc(loc));
2539 if (createNewBlock)
2540 builder.createBlock(builder.getBlock()->getParent());
2541}
2542
2543mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2544 clang::QualType qt) {
2545 mlir::Type t = convertType(qt);
2546 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2547 return builder.createDummyValue(loc, t, alignment);
2548}
2549
2550//===----------------------------------------------------------------------===//
2551// CIR builder helpers
2552//===----------------------------------------------------------------------===//
2553
2555 const Twine &name, Address *alloca,
2556 mlir::OpBuilder::InsertPoint ip) {
2557 // FIXME: Should we prefer the preferred type alignment here?
2558 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2559 alloca, ip);
2560}
2561
2563 mlir::Location loc, const Twine &name,
2564 Address *alloca,
2565 mlir::OpBuilder::InsertPoint ip) {
2566 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2567 /*ArraySize=*/nullptr, alloca, ip);
2568 if (ty->isConstantMatrixType()) {
2570 cgm.errorNYI(loc, "temporary matrix value");
2571 }
2572 return result;
2573}
2574
2575/// This creates a alloca and inserts it into the entry block of the
2576/// current region.
2578 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2579 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2580 cir::AllocaOp alloca = ip.isSet()
2581 ? createTempAlloca(ty, loc, name, ip, arraySize)
2582 : createTempAlloca(ty, loc, name, arraySize);
2583 alloca.setAlignmentAttr(cgm.getSize(align));
2584 return Address(alloca, ty, align);
2585}
2586
2587/// This creates a alloca and inserts it into the entry block. The alloca is
2588/// casted to default address space if necessary.
2589// TODO(cir): Implement address space casting to match classic codegen's
2590// CreateTempAlloca behavior with DestLangAS parameter
2592 mlir::Location loc, const Twine &name,
2593 mlir::Value arraySize,
2594 Address *allocaAddr,
2595 mlir::OpBuilder::InsertPoint ip) {
2596 Address alloca =
2597 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2598 if (allocaAddr)
2599 *allocaAddr = alloca;
2600 mlir::Value v = alloca.getPointer();
2601 // Alloca always returns a pointer in alloca address space, which may
2602 // be different from the type defined by the language. For example,
2603 // in C++ the auto variables are in the default address space. Therefore
2604 // cast alloca to the default address space when necessary.
2605
2606 cir::PointerType dstTy;
2608 dstTy = builder.getPointerTo(ty, getCIRAllocaAddressSpace());
2609 else
2610 dstTy = builder.getPointerTo(ty, clang::LangAS::Default);
2611 v = performAddrSpaceCast(v, dstTy);
2612
2613 return Address(v, ty, align);
2614}
2615
2616/// This creates an alloca and inserts it into the entry block if \p ArraySize
2617/// is nullptr, otherwise inserts it at the current insertion point of the
2618/// builder.
2619cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2620 mlir::Location loc,
2621 const Twine &name,
2622 mlir::Value arraySize,
2623 bool insertIntoFnEntryBlock) {
2624 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2625 insertIntoFnEntryBlock, arraySize)
2626 .getDefiningOp());
2627}
2628
2629/// This creates an alloca and inserts it into the provided insertion point
2630cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2631 mlir::Location loc,
2632 const Twine &name,
2633 mlir::OpBuilder::InsertPoint ip,
2634 mlir::Value arraySize) {
2635 assert(ip.isSet() && "Insertion point is not set");
2636 return mlir::cast<cir::AllocaOp>(
2637 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2638 .getDefiningOp());
2639}
2640
2641/// CreateDefaultAlignTempAlloca - This creates an alloca with the
2642/// default alignment of the corresponding LLVM type, which is *not*
2643/// guaranteed to be related in any way to the expected alignment of
2644/// an AST type that might have been lowered to Ty.
2646 mlir::Location loc,
2647 const Twine &name) {
2648 CharUnits align =
2649 CharUnits::fromQuantity(cgm.getDataLayout().getABITypeAlign(ty));
2650 return createTempAlloca(ty, align, loc, name);
2651}
2652
2653/// Try to emit a reference to the given value without producing it as
2654/// an l-value. For many cases, this is just an optimization, but it avoids
2655/// us needing to emit global copies of variables if they're named without
2656/// triggering a formal use in a context where we can't emit a direct
2657/// reference to them, for instance if a block or lambda or a member of a
2658/// local class uses a const int variable or constexpr variable from an
2659/// enclosing function.
2660///
2661/// For named members of enums, this is the only way they are emitted.
2664 const ValueDecl *value = refExpr->getDecl();
2665
2666 // There is a lot more to do here, but for now only EnumConstantDecl is
2667 // supported.
2669
2670 // The value needs to be an enum constant or a constant variable.
2671 if (!isa<EnumConstantDecl>(value))
2672 return ConstantEmission();
2673
2674 Expr::EvalResult result;
2675 if (!refExpr->EvaluateAsRValue(result, getContext()))
2676 return ConstantEmission();
2677
2678 QualType resultType = refExpr->getType();
2679
2680 // As long as we're only handling EnumConstantDecl, there should be no
2681 // side-effects.
2682 assert(!result.HasSideEffects);
2683
2684 // Emit as a constant.
2685 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2686 // somewhat heavy refactoring...)
2687 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2688 refExpr->getLocation(), result.Val, resultType);
2689 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2690 assert(cstToEmit && "expected a typed attribute");
2691
2693
2694 return ConstantEmission::forValue(cstToEmit);
2695}
2696
2700 return tryEmitAsConstant(dre);
2701 return ConstantEmission();
2702}
2703
2705 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2706 assert(constant && "not a constant");
2707 if (constant.isReference()) {
2708 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2709 return {};
2710 }
2711 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2712}
2713
2715 const StringLiteral *sl = e->getFunctionName();
2716 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2717 auto fn = cast<cir::FuncOp>(curFn);
2718 StringRef fnName = fn.getName();
2719 fnName.consume_front("\01");
2720 std::array<StringRef, 2> nameItems = {
2722 std::string gvName = llvm::join(nameItems, ".");
2723 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2724 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2725
2726 return emitStringLiteralLValue(sl, gvName);
2727}
2728
2733
2734namespace {
2735// Handle the case where the condition is a constant evaluatable simple integer,
2736// which means we don't have to separately handle the true/false blocks.
2737std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2739 const Expr *condExpr = e->getCond();
2740 llvm::APSInt condExprVal;
2741 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2742 return std::nullopt;
2743
2744 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2745 if (!condExprVal.getBoolValue())
2746 std::swap(live, dead);
2747
2748 if (cgf.containsLabel(dead))
2749 return std::nullopt;
2750
2751 // If the true case is live, we need to track its region.
2754 // If a throw expression we emit it and return an undefined lvalue
2755 // because it can't be used.
2756 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2757 cgf.emitCXXThrowExpr(throwExpr);
2758 // Return an undefined lvalue - the throw terminates execution
2759 // so this value will never actually be used
2760 mlir::Type elemTy = cgf.convertType(dead->getType());
2761 mlir::Value undefPtr =
2762 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2763 cgf.getLoc(throwExpr->getSourceRange()));
2764 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2765 dead->getType());
2766 }
2767 return cgf.emitLValue(live);
2768}
2769
2770/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2771/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2772/// LValue is returned and the current block has been terminated.
2773static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2774 const Expr *operand) {
2775 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2776 cgf.emitCXXThrowExpr(throwExpr);
2777 return std::nullopt;
2778 }
2779
2780 return cgf.emitLValue(operand);
2781}
2782} // namespace
2783
2784// Create and generate the 3 blocks for a conditional operator.
2785// Leaves the 'current block' in the continuation basic block.
2786template <typename FuncTy>
2789 const FuncTy &branchGenFunc) {
2790 ConditionalInfo info;
2791 ConditionalEvaluation eval(*this);
2792 mlir::Location loc = getLoc(e->getSourceRange());
2793 CIRGenBuilderTy &builder = getBuilder();
2794
2795 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2797 mlir::Type yieldTy{};
2798
2799 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2800 const Expr *expr, std::optional<LValue> &resultLV) {
2801 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2802 curLexScope->setAsTernary();
2803
2805 eval.beginEvaluation();
2806 resultLV = branchGenFunc(*this, expr);
2807 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2808 eval.endEvaluation();
2809
2810 if (resultPtr) {
2811 yieldTy = resultPtr.getType();
2812 cir::YieldOp::create(b, loc, resultPtr);
2813 } else {
2814 // If LHS or RHS is a void expression we need
2815 // to patch arms as to properly match yield types.
2816 // If the current block's terminator is an UnreachableOp (from a throw),
2817 // we don't need a yield
2818 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2819 mlir::Operation *terminator =
2820 builder.getInsertionBlock()->getTerminator();
2821 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2822 insertPoints.push_back(b.saveInsertionPoint());
2823 }
2824 }
2825 };
2826
2827 info.result = cir::TernaryOp::create(
2828 builder, loc, condV,
2829 /*trueBuilder=*/
2830 [&](mlir::OpBuilder &b, mlir::Location loc) {
2831 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2832 },
2833 /*falseBuilder=*/
2834 [&](mlir::OpBuilder &b, mlir::Location loc) {
2835 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2836 })
2837 .getResult();
2838
2839 // If both arms are void, so be it.
2840 if (!yieldTy)
2841 yieldTy = voidTy;
2842
2843 // Insert required yields.
2844 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2845 mlir::OpBuilder::InsertionGuard guard(builder);
2846 builder.restoreInsertionPoint(toInsert);
2847
2848 // Block does not return: build empty yield.
2849 if (!yieldTy) {
2850 cir::YieldOp::create(builder, loc);
2851 } else { // Block returns: set null yield value.
2852 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2853 cir::YieldOp::create(builder, loc, op0);
2854 }
2855 }
2856
2857 return info;
2858}
2859
2862 if (!expr->isGLValue()) {
2863 // ?: here should be an aggregate.
2864 assert(hasAggregateEvaluationKind(expr->getType()) &&
2865 "Unexpected conditional operator!");
2866 return emitAggExprToLValue(expr);
2867 }
2868
2869 OpaqueValueMapping binding(*this, expr);
2870 if (std::optional<LValue> res =
2871 handleConditionalOperatorLValueSimpleCase(*this, expr))
2872 return *res;
2873
2874 ConditionalInfo info =
2875 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2876 return emitLValueOrThrowExpression(cgf, e);
2877 });
2878
2879 if ((info.lhs && !info.lhs->isSimple()) ||
2880 (info.rhs && !info.rhs->isSimple())) {
2881 cgm.errorNYI(expr->getSourceRange(),
2882 "unsupported conditional operator with non-simple lvalue");
2883 return LValue();
2884 }
2885
2886 if (info.lhs && info.rhs) {
2887 Address lhsAddr = info.lhs->getAddress();
2888 Address rhsAddr = info.rhs->getAddress();
2889 Address result(info.result, lhsAddr.getElementType(),
2890 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2891 AlignmentSource alignSource =
2892 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2893 info.rhs->getBaseInfo().getAlignmentSource());
2895 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2896 }
2897
2898 assert((info.lhs || info.rhs) &&
2899 "both operands of glvalue conditional are throw-expressions?");
2900 return info.lhs ? *info.lhs : *info.rhs;
2901}
2902
2903/// An LValue is a candidate for having its loads and stores be made atomic if
2904/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2905/// performing such an operation can be performed without a libcall.
2907 if (!cgm.getLangOpts().MSVolatile)
2908 return false;
2909
2910 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2911 return false;
2912}
2913
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
static Address emitAddrOfZeroSizeField(CIRGenFunction &cgf, Address base, const FieldDecl *field)
Get the address of a zero-sized field within a record.
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global, bool threadLocal=false)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2767
SourceLocation getEndLoc() const
Definition Expr.h:2770
QualType getElementType() const
Definition TypeBase.h:3784
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Type getType() const
Definition Address.h:115
bool isValid() const
Definition Address.h:75
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:139
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
cir::ConstantOp getUInt64(uint64_t c, mlir::Location loc)
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc)
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Operation * getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *mte, const Expr *init)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
mlir::Value getVectorPointer() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a C++ destructor within a class.
Definition DeclCXX.h:2889
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3766
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3325
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
SourceLocation getLocation() const
Definition DeclBase.h:447
DeclContext * getDeclContext()
Definition DeclBase.h:456
bool hasAttr() const
Definition DeclBase.h:585
const Expr * getBase() const
Definition Expr.h:6580
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4436
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4549
Represents a member of a struct/union/class.
Definition Decl.h:3175
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3278
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4828
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3411
bool isPotentiallyOverlapping() const
Determine if this field is of potentially-overlapping class type, that is, subobject with the [[no_un...
Definition Decl.cpp:4806
Represents a function declaration or definition.
Definition Decl.h:2015
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4917
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4942
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4934
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4967
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8471
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1185
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4342
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:9034
bool isBooleanType() const
Definition TypeBase.h:9171
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:420
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9337
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8767
bool isFunctionPointerType() const
Definition TypeBase.h:8735
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2375
bool isConstantMatrixType() const
Definition TypeBase.h:8835
bool isPointerType() const
Definition TypeBase.h:8668
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
bool isVariableArrayType() const
Definition TypeBase.h:8779
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isExtVectorBoolType() const
Definition TypeBase.h:8815
bool isAnyComplexType() const
Definition TypeBase.h:8803
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9214
bool isAtomicType() const
Definition TypeBase.h:8860
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2850
bool isFunctionType() const
Definition TypeBase.h:8664
bool isVectorType() const
Definition TypeBase.h:8807
bool isSubscriptableVectorType() const
Definition TypeBase.h:8827
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2397
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2322
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2181
bool hasInit() const
Definition Decl.cpp:2411
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2379
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Represents a GCC generic vector type.
Definition TypeBase.h:4225
Defines the clang::TargetInfo interface.
mlir::ptr::MemorySpaceAttrInterface toCIRAddressSpaceAttr(mlir::MLIRContext &ctx, clang::LangAS langAS)
Convert an AST LangAS to the appropriate CIR address space attribute interface.
OverflowBehavior
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, CUDAKernelCallExpr > cudaKernelCallExpr
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:258
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:343
@ SD_Static
Static storage duration.
Definition Specifiers.h:344
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:341
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:342
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:345
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:178
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:181
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615