clang 22.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
31#include <optional>
32
33using namespace clang;
34using namespace clang::CIRGen;
35using namespace cir;
36
37/// Get the address of a zero-sized field within a record. The resulting address
38/// doesn't necessarily have the right type.
40 const FieldDecl *field,
41 llvm::StringRef fieldName,
42 unsigned fieldIndex) {
43 if (field->isZeroSize(getContext())) {
44 cgm.errorNYI(field->getSourceRange(),
45 "emitAddrOfFieldStorage: zero-sized field");
46 return Address::invalid();
47 }
48
49 mlir::Location loc = getLoc(field->getLocation());
50
51 mlir::Type fieldType = convertType(field->getType());
52 auto fieldPtr = cir::PointerType::get(fieldType);
53 // For most cases fieldName is the same as field->getName() but for lambdas,
54 // which do not currently carry the name, so it can be passed down from the
55 // CaptureStmt.
56 cir::GetMemberOp memberAddr = builder.createGetMember(
57 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
58
59 // Retrieve layout information, compute alignment and return the final
60 // address.
61 const RecordDecl *rec = field->getParent();
62 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
63 unsigned idx = layout.getCIRFieldNo(field);
65 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
66 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
67}
68
69/// Given an expression of pointer type, try to
70/// derive a more accurate bound on the alignment of the pointer.
72 LValueBaseInfo *baseInfo) {
73 // We allow this with ObjC object pointers because of fragile ABIs.
74 assert(expr->getType()->isPointerType() ||
75 expr->getType()->isObjCObjectPointerType());
76 expr = expr->IgnoreParens();
77
78 // Casts:
79 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
80 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
81 cgm.emitExplicitCastExprType(ece);
82
83 switch (ce->getCastKind()) {
84 // Non-converting casts (but not C's implicit conversion from void*).
85 case CK_BitCast:
86 case CK_NoOp:
87 case CK_AddressSpaceConversion: {
88 if (const auto *ptrTy =
89 ce->getSubExpr()->getType()->getAs<PointerType>()) {
90 if (ptrTy->getPointeeType()->isVoidType())
91 break;
92
93 LValueBaseInfo innerBaseInfo;
95 Address addr =
96 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
97 if (baseInfo)
98 *baseInfo = innerBaseInfo;
99
100 if (isa<ExplicitCastExpr>(ce)) {
101 LValueBaseInfo targetTypeBaseInfo;
102
103 const QualType pointeeType = expr->getType()->getPointeeType();
104 const CharUnits align =
105 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
106
107 // If the source l-value is opaque, honor the alignment of the
108 // casted-to type.
109 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
110 if (baseInfo)
111 baseInfo->mergeForCast(targetTypeBaseInfo);
112 addr = Address(addr.getPointer(), addr.getElementType(), align);
113 }
114 }
115
117
118 const mlir::Type eltTy =
119 convertTypeForMem(expr->getType()->getPointeeType());
120 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
121 addr, eltTy);
123
124 return addr;
125 }
126 break;
127 }
128
129 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
130 case CK_ArrayToPointerDecay:
131 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
132
133 case CK_UncheckedDerivedToBase:
134 case CK_DerivedToBase: {
137 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
138 const CXXRecordDecl *derived =
139 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
140 return getAddressOfBaseClass(addr, derived, ce->path(),
142 ce->getExprLoc());
143 }
144
145 case CK_AnyPointerToBlockPointerCast:
146 case CK_BaseToDerived:
147 case CK_BaseToDerivedMemberPointer:
148 case CK_BlockPointerToObjCPointerCast:
149 case CK_BuiltinFnToFnPtr:
150 case CK_CPointerToObjCPointerCast:
151 case CK_DerivedToBaseMemberPointer:
152 case CK_Dynamic:
153 case CK_FunctionToPointerDecay:
154 case CK_IntegralToPointer:
155 case CK_LValueToRValue:
156 case CK_LValueToRValueBitCast:
157 case CK_NullToMemberPointer:
158 case CK_NullToPointer:
159 case CK_ReinterpretMemberPointer:
160 // Common pointer conversions, nothing to do here.
161 // TODO: Is there any reason to treat base-to-derived conversions
162 // specially?
163 break;
164
165 case CK_ARCConsumeObject:
166 case CK_ARCExtendBlockObject:
167 case CK_ARCProduceObject:
168 case CK_ARCReclaimReturnedObject:
169 case CK_AtomicToNonAtomic:
170 case CK_BooleanToSignedIntegral:
171 case CK_ConstructorConversion:
172 case CK_CopyAndAutoreleaseBlockObject:
173 case CK_Dependent:
174 case CK_FixedPointCast:
175 case CK_FixedPointToBoolean:
176 case CK_FixedPointToFloating:
177 case CK_FixedPointToIntegral:
178 case CK_FloatingCast:
179 case CK_FloatingComplexCast:
180 case CK_FloatingComplexToBoolean:
181 case CK_FloatingComplexToIntegralComplex:
182 case CK_FloatingComplexToReal:
183 case CK_FloatingRealToComplex:
184 case CK_FloatingToBoolean:
185 case CK_FloatingToFixedPoint:
186 case CK_FloatingToIntegral:
187 case CK_HLSLAggregateSplatCast:
188 case CK_HLSLArrayRValue:
189 case CK_HLSLElementwiseCast:
190 case CK_HLSLVectorTruncation:
191 case CK_IntToOCLSampler:
192 case CK_IntegralCast:
193 case CK_IntegralComplexCast:
194 case CK_IntegralComplexToBoolean:
195 case CK_IntegralComplexToFloatingComplex:
196 case CK_IntegralComplexToReal:
197 case CK_IntegralRealToComplex:
198 case CK_IntegralToBoolean:
199 case CK_IntegralToFixedPoint:
200 case CK_IntegralToFloating:
201 case CK_LValueBitCast:
202 case CK_MatrixCast:
203 case CK_MemberPointerToBoolean:
204 case CK_NonAtomicToAtomic:
205 case CK_ObjCObjectLValueCast:
206 case CK_PointerToBoolean:
207 case CK_PointerToIntegral:
208 case CK_ToUnion:
209 case CK_ToVoid:
210 case CK_UserDefinedConversion:
211 case CK_VectorSplat:
212 case CK_ZeroToOCLOpaqueType:
213 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
214 }
215 }
216
217 // Unary &
218 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
219 // TODO(cir): maybe we should use cir.unary for pointers here instead.
220 if (uo->getOpcode() == UO_AddrOf) {
221 LValue lv = emitLValue(uo->getSubExpr());
222 if (baseInfo)
223 *baseInfo = lv.getBaseInfo();
225 return lv.getAddress();
226 }
227 }
228
229 // std::addressof and variants.
230 if (auto const *call = dyn_cast<CallExpr>(expr)) {
231 switch (call->getBuiltinCallee()) {
232 default:
233 break;
234 case Builtin::BIaddressof:
235 case Builtin::BI__addressof:
236 case Builtin::BI__builtin_addressof: {
237 cgm.errorNYI(expr->getSourceRange(),
238 "emitPointerWithAlignment: builtin addressof");
239 return Address::invalid();
240 }
241 }
242 }
243
244 // Otherwise, use the alignment of the type.
246 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
247 /*forPointeeType=*/true, baseInfo);
248}
249
251 bool isInit) {
252 if (!dst.isSimple()) {
253 if (dst.isVectorElt()) {
254 // Read/modify/write the vector, inserting the new element
255 const mlir::Location loc = dst.getVectorPointer().getLoc();
256 const mlir::Value vector =
257 builder.createLoad(loc, dst.getVectorAddress());
258 const mlir::Value newVector = cir::VecInsertOp::create(
259 builder, loc, vector, src.getValue(), dst.getVectorIdx());
260 builder.createStore(loc, newVector, dst.getVectorAddress());
261 return;
262 }
263
264 assert(dst.isBitField() && "Unknown LValue type");
266 return;
267
268 cgm.errorNYI(dst.getPointer().getLoc(),
269 "emitStoreThroughLValue: non-simple lvalue");
270 return;
271 }
272
274
275 assert(src.isScalar() && "Can't emit an aggregate store with this method");
276 emitStoreOfScalar(src.getValue(), dst, isInit);
277}
278
279static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
280 const VarDecl *vd) {
281 QualType t = e->getType();
282
283 // If it's thread_local, emit a call to its wrapper function instead.
285 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
286 cgf.cgm.errorNYI(e->getSourceRange(),
287 "emitGlobalVarDeclLValue: thread_local variable");
288
289 // Check if the variable is marked as declare target with link clause in
290 // device codegen.
291 if (cgf.getLangOpts().OpenMP)
292 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
293
294 // Traditional LLVM codegen handles thread local separately, CIR handles
295 // as part of getAddrOfGlobalVar.
296 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
297
299 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
300 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
301 if (realPtrTy != v.getType())
302 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
303
304 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
305 Address addr(v, realVarTy, alignment);
306 LValue lv;
307 if (vd->getType()->isReferenceType())
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "emitGlobalVarDeclLValue: reference type");
310 else
311 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
313 return lv;
314}
315
316void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
317 bool isVolatile, QualType ty,
318 LValueBaseInfo baseInfo, bool isInit,
319 bool isNontemporal) {
321
322 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
323 // Boolean vectors use `iN` as storage type.
324 if (clangVecTy->isExtVectorBoolType())
325 cgm.errorNYI(addr.getPointer().getLoc(),
326 "emitStoreOfScalar ExtVectorBoolType");
327
328 // Handle vectors of size 3 like size 4 for better performance.
329 const mlir::Type elementType = addr.getElementType();
330 const auto vecTy = cast<cir::VectorType>(elementType);
331
332 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
334 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
335 cgm.errorNYI(addr.getPointer().getLoc(),
336 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
337 }
338
339 value = emitToMemory(value, ty);
340
342 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
343 if (ty->isAtomicType() ||
344 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
345 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
346 return;
347 }
348
349 // Update the alloca with more info on initialization.
350 assert(addr.getPointer() && "expected pointer to exist");
351 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
352 if (currVarDecl && srcAlloca) {
353 const VarDecl *vd = currVarDecl;
354 assert(vd && "VarDecl expected");
355 if (vd->hasInit())
356 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
357 }
358
359 assert(currSrcLoc && "must pass in source location");
360 builder.createStore(*currSrcLoc, value, addr, isVolatile);
361
362 if (isNontemporal) {
363 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
364 return;
365 }
366
368}
369
370// TODO: Replace this with a proper TargetInfo function call.
371/// Helper method to check if the underlying ABI is AAPCS
372static bool isAAPCS(const TargetInfo &targetInfo) {
373 return targetInfo.getABI().starts_with("aapcs");
374}
375
377 LValue dst) {
378
379 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
380 mlir::Type resLTy = convertTypeForMem(dst.getType());
381 Address ptr = dst.getBitFieldAddress();
382
383 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
384 dst.isVolatileQualified() &&
385 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
386
387 mlir::Value dstAddr = dst.getAddress().getPointer();
388
389 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
390 ptr.getElementType(), src.getValue(), info,
391 dst.isVolatileQualified(), useVoaltile);
392}
393
395 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
396
397 // Get the output type.
398 mlir::Type resLTy = convertType(lv.getType());
399 Address ptr = lv.getBitFieldAddress();
400
401 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
402 isAAPCS(cgm.getTarget());
403
404 mlir::Value field =
405 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
406 info, lv.isVolatile(), useVoaltile);
408 return RValue::get(field);
409}
410
412 const FieldDecl *field,
413 mlir::Type fieldType,
414 unsigned index) {
415 mlir::Location loc = getLoc(field->getLocation());
416 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
418 cir::GetMemberOp sea = getBuilder().createGetMember(
419 loc, fieldPtr, base.getPointer(), field->getName(),
420 rec.isUnion() ? field->getFieldIndex() : index);
422 rec.getElementOffset(cgm.getDataLayout().layout, index));
423 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
424}
425
427 const FieldDecl *field) {
428 LValueBaseInfo baseInfo = base.getBaseInfo();
429 const CIRGenRecordLayout &layout =
430 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
431 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
432
434
435 unsigned idx = layout.getCIRFieldNo(field);
436 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
437
438 mlir::Location loc = getLoc(field->getLocation());
439 if (addr.getElementType() != info.storageType)
440 addr = builder.createElementBitCast(loc, addr, info.storageType);
441
442 QualType fieldType =
444 // TODO(cir): Support TBAA for bit fields.
446 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
447 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
448}
449
451 LValueBaseInfo baseInfo = base.getBaseInfo();
452
453 if (field->isBitField())
454 return emitLValueForBitField(base, field);
455
456 QualType fieldType = field->getType();
457 const RecordDecl *rec = field->getParent();
458 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
459 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
461
462 Address addr = base.getAddress();
463 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
464 if (cgm.getCodeGenOpts().StrictVTablePointers &&
465 classDecl->isDynamicClass()) {
466 cgm.errorNYI(field->getSourceRange(),
467 "emitLValueForField: strict vtable for dynamic class");
468 }
469 }
470
471 unsigned recordCVR = base.getVRQualifiers();
472
473 llvm::StringRef fieldName = field->getName();
474 unsigned fieldIndex;
475 if (cgm.lambdaFieldToName.count(field))
476 fieldName = cgm.lambdaFieldToName[field];
477
478 if (rec->isUnion())
479 fieldIndex = field->getFieldIndex();
480 else {
481 const CIRGenRecordLayout &layout =
482 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
483 fieldIndex = layout.getCIRFieldNo(field);
484 }
485
486 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
488
489 // If this is a reference field, load the reference right now.
490 if (fieldType->isReferenceType()) {
492 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
493 if (recordCVR & Qualifiers::Volatile)
494 refLVal.getQuals().addVolatile();
495 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
496 &fieldBaseInfo);
497
498 // Qualifiers on the struct don't apply to the referencee.
499 recordCVR = 0;
500 fieldType = fieldType->getPointeeType();
501 }
502
503 if (field->hasAttr<AnnotateAttr>()) {
504 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
505 return LValue();
506 }
507
508 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
509 lv.getQuals().addCVRQualifiers(recordCVR);
510
511 // __weak attribute on a field is ignored.
513 cgm.errorNYI(field->getSourceRange(),
514 "emitLValueForField: __weak attribute");
515 return LValue();
516 }
517
518 return lv;
519}
520
522 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
523 QualType fieldType = field->getType();
524
525 if (!fieldType->isReferenceType())
526 return emitLValueForField(base, field);
527
528 const CIRGenRecordLayout &layout =
529 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
530 unsigned fieldIndex = layout.getCIRFieldNo(field);
531
532 Address v =
533 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
534
535 // Make sure that the address is pointing to the right type.
536 mlir::Type memTy = convertTypeForMem(fieldType);
537 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
538
539 // TODO: Generate TBAA information that describes this access as a structure
540 // member access and not just an access to an object of the field's type. This
541 // should be similar to what we do in EmitLValueForField().
542 LValueBaseInfo baseInfo = base.getBaseInfo();
543 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
544 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
546 return makeAddrLValue(v, fieldType, fieldBaseInfo);
547}
548
549mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
550 // Bool has a different representation in memory than in registers,
551 // but in ClangIR, it is simply represented as a cir.bool value.
552 // This function is here as a placeholder for possible future changes.
553 return value;
554}
555
556void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
557 bool isInit) {
558 if (lvalue.getType()->isConstantMatrixType()) {
559 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
560 return;
561 }
562
563 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
564 lvalue.getType(), lvalue.getBaseInfo(), isInit,
565 /*isNontemporal=*/false);
566}
567
568mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
569 QualType ty, SourceLocation loc,
570 LValueBaseInfo baseInfo) {
572 mlir::Type eltTy = addr.getElementType();
573
574 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
575 if (clangVecTy->isExtVectorBoolType()) {
576 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
577 return nullptr;
578 }
579
580 const auto vecTy = cast<cir::VectorType>(eltTy);
581
582 // Handle vectors of size 3 like size 4 for better performance.
584 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
585 cgm.errorNYI(addr.getPointer().getLoc(),
586 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
587 }
588
590 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
591 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
592 cgm.errorNYI("emitLoadOfScalar: load atomic");
593
594 if (mlir::isa<cir::VoidType>(eltTy))
595 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
596
598
599 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
600 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
601 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
602
603 return loadOp;
604}
605
607 SourceLocation loc) {
610 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
611 lvalue.getType(), loc, lvalue.getBaseInfo());
612}
613
614/// Given an expression that represents a value lvalue, this
615/// method emits the address of the lvalue, then loads the result as an rvalue,
616/// returning the rvalue.
618 assert(!lv.getType()->isFunctionType());
619 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
620
621 if (lv.isBitField())
622 return emitLoadOfBitfieldLValue(lv, loc);
623
624 if (lv.isSimple())
625 return RValue::get(emitLoadOfScalar(lv, loc));
626
627 if (lv.isVectorElt()) {
628 const mlir::Value load =
629 builder.createLoad(getLoc(loc), lv.getVectorAddress());
630 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
631 lv.getVectorIdx()));
632 }
633
634 if (lv.isExtVectorElt())
636
637 cgm.errorNYI(loc, "emitLoadOfLValue");
638 return RValue::get(nullptr);
639}
640
641int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
642 const mlir::ArrayAttr elts) {
643 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
644 return elt.getInt();
645}
646
647// If this is a reference to a subset of the elements of a vector, create an
648// appropriate shufflevector.
650 mlir::Location loc = lv.getExtVectorPointer().getLoc();
651 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
652
653 // HLSL allows treating scalars as one-element vectors. Converting the scalar
654 // IR value to a vector here allows the rest of codegen to behave as normal.
655 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
656 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
657 return {};
658 }
659
660 const mlir::ArrayAttr elts = lv.getExtVectorElts();
661
662 // If the result of the expression is a non-vector type, we must be extracting
663 // a single element. Just codegen as an extractelement.
664 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
665 if (!exprVecTy) {
666 int64_t indexValue = getAccessedFieldNo(0, elts);
667 cir::ConstantOp index =
668 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
669 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
670 }
671
672 // Always use shuffle vector to try to retain the original program structure
674 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
675 mask.push_back(getAccessedFieldNo(i, elts));
676
677 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
678 if (lv.getType()->isExtVectorBoolType()) {
679 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
680 return {};
681 }
682
683 return RValue::get(resultVec);
684}
685
686static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
688 return cgm.getAddrOfFunction(gd);
689}
690
692 mlir::Value thisValue) {
693 return cgf.emitLValueForLambdaField(fd, thisValue);
694}
695
696/// Given that we are currently emitting a lambda, emit an l-value for
697/// one of its members.
698///
700 mlir::Value thisValue) {
701 bool hasExplicitObjectParameter = false;
702 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
703 LValue lambdaLV;
704 if (methD) {
705 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
706 assert(methD->getParent()->isLambda());
707 assert(methD->getParent() == field->getParent());
708 }
709 if (hasExplicitObjectParameter) {
710 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
711 } else {
712 QualType lambdaTagType =
714 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
715 }
716 return emitLValueForField(lambdaLV, field);
717}
718
722
723static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
724 GlobalDecl gd) {
725 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
726 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
727 mlir::Location loc = cgf.getLoc(e->getSourceRange());
728 CharUnits align = cgf.getContext().getDeclAlign(fd);
729
731
732 mlir::Type fnTy = funcOp.getFunctionType();
733 mlir::Type ptrTy = cir::PointerType::get(fnTy);
734 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
735 funcOp.getSymName());
736
737 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
738 fnTy = cgf.convertType(fd->getType());
739 ptrTy = cir::PointerType::get(fnTy);
740
741 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
742 cir::CastKind::bitcast, addr);
743 }
744
745 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
747}
748
749/// Determine whether we can emit a reference to \p vd from the current
750/// context, despite not necessarily having seen an odr-use of the variable in
751/// this context.
752/// TODO(cir): This could be shared with classic codegen.
754 const DeclRefExpr *e,
755 const VarDecl *vd) {
756 // For a variable declared in an enclosing scope, do not emit a spurious
757 // reference even if we have a capture, as that will emit an unwarranted
758 // reference to our capture state, and will likely generate worse code than
759 // emitting a local copy.
761 return false;
762
763 // For a local declaration declared in this function, we can always reference
764 // it even if we don't have an odr-use.
765 if (vd->hasLocalStorage()) {
766 return vd->getDeclContext() ==
767 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
768 }
769
770 // For a global declaration, we can emit a reference to it if we know
771 // for sure that we are able to emit a definition of it.
772 vd = vd->getDefinition(cgf.getContext());
773 if (!vd)
774 return false;
775
776 // Don't emit a spurious reference if it might be to a variable that only
777 // exists on a different device / target.
778 // FIXME: This is unnecessarily broad. Check whether this would actually be a
779 // cross-target reference.
780 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
781 cgf.getLangOpts().OpenCL) {
782 return false;
783 }
784
785 // We can emit a spurious reference only if the linkage implies that we'll
786 // be emitting a non-interposable symbol that will be retained until link
787 // time.
788 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
789 case cir::GlobalLinkageKind::ExternalLinkage:
790 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
791 case cir::GlobalLinkageKind::WeakODRLinkage:
792 case cir::GlobalLinkageKind::InternalLinkage:
793 case cir::GlobalLinkageKind::PrivateLinkage:
794 return true;
795 default:
796 return false;
797 }
798}
799
801 const NamedDecl *nd = e->getDecl();
802 QualType ty = e->getType();
803
804 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
805 "should not emit an unevaluated operand");
806
807 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
808 // Global Named registers access via intrinsics only
809 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
810 !vd->isLocalVarDecl()) {
811 cgm.errorNYI(e->getSourceRange(),
812 "emitDeclRefLValue: Global Named registers access");
813 return LValue();
814 }
815
816 if (e->isNonOdrUse() == NOUR_Constant &&
817 (vd->getType()->isReferenceType() ||
818 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
819 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
820 return LValue();
821 }
822
823 // Check for captured variables.
825 vd = vd->getCanonicalDecl();
826 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
827 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
830 }
831 }
832
833 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
834 // Checks for omitted feature handling
841
842 // Check if this is a global variable
843 if (vd->hasLinkage() || vd->isStaticDataMember())
844 return emitGlobalVarDeclLValue(*this, e, vd);
845
846 Address addr = Address::invalid();
847
848 // The variable should generally be present in the local decl map.
849 auto iter = localDeclMap.find(vd);
850 if (iter != localDeclMap.end()) {
851 addr = iter->second;
852 } else {
853 // Otherwise, it might be static local we haven't emitted yet for some
854 // reason; most likely, because it's in an outer function.
855 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
856 }
857
858 // Drill into reference types.
859 LValue lv =
860 vd->getType()->isReferenceType()
864
865 // Statics are defined as globals, so they are not include in the function's
866 // symbol table.
867 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
868 "non-static locals should be already mapped");
869
870 return lv;
871 }
872
873 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
876 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
877 return LValue();
878 }
879 return emitLValue(bd->getBinding());
880 }
881
882 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
883 LValue lv = emitFunctionDeclLValue(*this, e, fd);
884
885 // Emit debuginfo for the function declaration if the target wants to.
886 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
888
889 return lv;
890 }
891
892 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
893 return LValue();
894}
895
897 QualType boolTy = getContext().BoolTy;
898 SourceLocation loc = e->getExprLoc();
899
901 if (e->getType()->getAs<MemberPointerType>()) {
902 cgm.errorNYI(e->getSourceRange(),
903 "evaluateExprAsBool: member pointer type");
904 return createDummyValue(getLoc(loc), boolTy);
905 }
906
908 if (!e->getType()->isAnyComplexType())
909 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
910
912 loc);
913}
914
917
918 // __extension__ doesn't affect lvalue-ness.
919 if (op == UO_Extension)
920 return emitLValue(e->getSubExpr());
921
922 switch (op) {
923 case UO_Deref: {
925 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
926
928 LValueBaseInfo baseInfo;
929 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
930
931 // Tag 'load' with deref attribute.
932 // FIXME: This misses some derefence cases and has problematic interactions
933 // with other operators.
934 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
935 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
936
937 LValue lv = makeAddrLValue(addr, t, baseInfo);
940 return lv;
941 }
942 case UO_Real:
943 case UO_Imag: {
944 LValue lv = emitLValue(e->getSubExpr());
945 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
946
947 // __real is valid on scalars. This is a faster way of testing that.
948 // __imag can only produce an rvalue on scalars.
949 if (e->getOpcode() == UO_Real &&
950 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
951 assert(e->getSubExpr()->getType()->isArithmeticType());
952 return lv;
953 }
954
956 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
957 mlir::Location loc = getLoc(e->getExprLoc());
958 Address component =
959 e->getOpcode() == UO_Real
960 ? builder.createComplexRealPtr(loc, lv.getAddress())
961 : builder.createComplexImagPtr(loc, lv.getAddress());
963 LValue elemLV = makeAddrLValue(component, elemTy);
964 elemLV.getQuals().addQualifiers(lv.getQuals());
965 return elemLV;
966 }
967 case UO_PreInc:
968 case UO_PreDec: {
969 cir::UnaryOpKind kind =
970 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
971 LValue lv = emitLValue(e->getSubExpr());
972
973 assert(e->isPrefix() && "Prefix operator in unexpected state!");
974
975 if (e->getType()->isAnyComplexType()) {
976 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
977 } else {
978 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
979 }
980
981 return lv;
982 }
983 case UO_Extension:
984 llvm_unreachable("UnaryOperator extension should be handled above!");
985 case UO_Plus:
986 case UO_Minus:
987 case UO_Not:
988 case UO_LNot:
989 case UO_AddrOf:
990 case UO_PostInc:
991 case UO_PostDec:
992 case UO_Coawait:
993 llvm_unreachable("UnaryOperator of non-lvalue kind!");
994 }
995 llvm_unreachable("Unknown unary operator kind!");
996}
997
998/// If the specified expr is a simple decay from an array to pointer,
999/// return the array subexpression.
1000/// FIXME: this could be abstracted into a common AST helper.
1001static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1002 // If this isn't just an array->pointer decay, bail out.
1003 const auto *castExpr = dyn_cast<CastExpr>(e);
1004 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1005 return nullptr;
1006
1007 // If this is a decay from variable width array, bail out.
1008 const Expr *subExpr = castExpr->getSubExpr();
1009 if (subExpr->getType()->isVariableArrayType())
1010 return nullptr;
1011
1012 return subExpr;
1013}
1014
1015static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1016 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1017 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1018 return constantOp.getValueAttr<cir::IntAttr>();
1019 return {};
1020}
1021
1022static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1023 CharUnits eltSize) {
1024 // If we have a constant index, we can use the exact offset of the
1025 // element we're accessing.
1026 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1027 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1028 return arrayAlign.alignmentAtOffset(offset);
1029 }
1030 // Otherwise, use the worst-case alignment for any element.
1031 return arrayAlign.alignmentOfArrayElement(eltSize);
1032}
1033
1035 const VariableArrayType *vla) {
1036 QualType eltType;
1037 do {
1038 eltType = vla->getElementType();
1039 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1040 return eltType;
1041}
1042
1044 mlir::Location beginLoc,
1045 mlir::Location endLoc, mlir::Value ptr,
1046 mlir::Type eltTy, mlir::Value idx,
1047 bool shouldDecay) {
1048 CIRGenModule &cgm = cgf.getCIRGenModule();
1049 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1050 // that would enhance tracking this later in CIR?
1052 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1053 shouldDecay);
1054}
1055
1057 mlir::Location beginLoc,
1058 mlir::Location endLoc, Address addr,
1059 QualType eltType, mlir::Value idx,
1060 mlir::Location loc, bool shouldDecay) {
1061
1062 // Determine the element size of the statically-sized base. This is
1063 // the thing that the indices are expressed in terms of.
1064 if (const VariableArrayType *vla =
1065 cgf.getContext().getAsVariableArrayType(eltType)) {
1066 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1067 }
1068
1069 // We can use that to compute the best alignment of the element.
1070 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1071 const CharUnits eltAlign =
1072 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1073
1075 const mlir::Value eltPtr =
1076 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1077 addr.getElementType(), idx, shouldDecay);
1078 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1079 return Address(eltPtr, elementType, eltAlign);
1080}
1081
1082LValue
1085 cgm.errorNYI(e->getSourceRange(),
1086 "emitArraySubscriptExpr: ExtVectorElementExpr");
1088 }
1089
1090 if (getContext().getAsVariableArrayType(e->getType())) {
1091 cgm.errorNYI(e->getSourceRange(),
1092 "emitArraySubscriptExpr: VariableArrayType");
1094 }
1095
1096 if (e->getType()->getAs<ObjCObjectType>()) {
1097 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1099 }
1100
1101 // The index must always be an integer, which is not an aggregate. Emit it
1102 // in lexical order (this complexity is, sadly, required by C++17).
1103 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1104 "index was neither LHS nor RHS");
1105
1106 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1107 const mlir::Value idx = emitScalarExpr(e->getIdx());
1108
1109 // Extend or truncate the index type to 32 or 64-bits.
1110 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1111 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1112 cgm.errorNYI(e->getSourceRange(),
1113 "emitArraySubscriptExpr: index type cast");
1114 return idx;
1115 };
1116
1117 // If the base is a vector type, then we are forming a vector element
1118 // with this subscript.
1119 if (e->getBase()->getType()->isVectorType() &&
1121 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1122 const LValue lhs = emitLValue(e->getBase());
1123 return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(),
1124 lhs.getBaseInfo());
1125 }
1126
1127 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1128 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1129 LValue arrayLV;
1130 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1131 arrayLV = emitArraySubscriptExpr(ase);
1132 else
1133 arrayLV = emitLValue(array);
1134
1135 // Propagate the alignment from the array itself to the result.
1136 const Address addr = emitArraySubscriptPtr(
1137 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1138 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1139 /*shouldDecay=*/true);
1140
1141 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1142
1143 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1144 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1145 }
1146
1147 return lv;
1148 }
1149
1150 // The base must be a pointer; emit it with an estimate of its alignment.
1151 assert(e->getBase()->getType()->isPointerType() &&
1152 "The base must be a pointer");
1153
1154 LValueBaseInfo eltBaseInfo;
1155 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1156 // Propagate the alignment from the array itself to the result.
1157 const Address addxr = emitArraySubscriptPtr(
1158 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1159 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1160 /*shouldDecay=*/false);
1161
1162 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1163
1164 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1165 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1166 }
1167
1168 return lv;
1169}
1170
1172 // Emit the base vector as an l-value.
1173 LValue base;
1174
1175 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1176 if (e->isArrow()) {
1177 // If it is a pointer to a vector, emit the address and form an lvalue with
1178 // it.
1179 LValueBaseInfo baseInfo;
1180 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1181 const auto *clangPtrTy =
1183 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1184 base.getQuals().removeObjCGCAttr();
1185 } else if (e->getBase()->isGLValue()) {
1186 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1187 // emit the base as an lvalue.
1188 assert(e->getBase()->getType()->isVectorType());
1189 base = emitLValue(e->getBase());
1190 } else {
1191 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1192 assert(e->getBase()->getType()->isVectorType() &&
1193 "Result must be a vector");
1194 mlir::Value vec = emitScalarExpr(e->getBase());
1195
1196 // Store the vector to memory (because LValue wants an address).
1197 QualType baseTy = e->getBase()->getType();
1198 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1199 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1200 cgm.errorNYI(e->getSourceRange(),
1201 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1202 return {};
1203 }
1204 builder.createStore(vec.getLoc(), vec, vecMem);
1205 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1206 }
1207
1208 QualType type =
1210
1211 // Encode the element access list into a vector of unsigned indices.
1213 e->getEncodedElementAccess(indices);
1214
1215 if (base.isSimple()) {
1216 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1217 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1218 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1219 base.getBaseInfo());
1220 }
1221
1222 cgm.errorNYI(e->getSourceRange(),
1223 "emitExtVectorElementExpr: isSimple is false");
1224 return {};
1225}
1226
1228 llvm::StringRef name) {
1229 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1230 assert(globalOp.getAlignment() && "expected alignment for string literal");
1231 unsigned align = *(globalOp.getAlignment());
1232 mlir::Value addr =
1233 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1234 return makeAddrLValue(
1235 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1237}
1238
1239/// Casts are never lvalues unless that cast is to a reference type. If the cast
1240/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1241/// is needed by the code generator in an lvalue context, then it must mean that
1242/// we need the address of an aggregate in order to access one of its members.
1243/// This can happen for all the reasons that casts are permitted with aggregate
1244/// result, including noop aggregate casts, and cast from scalar to union.
1246 switch (e->getCastKind()) {
1247 case CK_ToVoid:
1248 case CK_BitCast:
1249 case CK_LValueToRValueBitCast:
1250 case CK_ArrayToPointerDecay:
1251 case CK_FunctionToPointerDecay:
1252 case CK_NullToMemberPointer:
1253 case CK_NullToPointer:
1254 case CK_IntegralToPointer:
1255 case CK_PointerToIntegral:
1256 case CK_PointerToBoolean:
1257 case CK_IntegralCast:
1258 case CK_BooleanToSignedIntegral:
1259 case CK_IntegralToBoolean:
1260 case CK_IntegralToFloating:
1261 case CK_FloatingToIntegral:
1262 case CK_FloatingToBoolean:
1263 case CK_FloatingCast:
1264 case CK_FloatingRealToComplex:
1265 case CK_FloatingComplexToReal:
1266 case CK_FloatingComplexToBoolean:
1267 case CK_FloatingComplexCast:
1268 case CK_FloatingComplexToIntegralComplex:
1269 case CK_IntegralRealToComplex:
1270 case CK_IntegralComplexToReal:
1271 case CK_IntegralComplexToBoolean:
1272 case CK_IntegralComplexCast:
1273 case CK_IntegralComplexToFloatingComplex:
1274 case CK_DerivedToBaseMemberPointer:
1275 case CK_BaseToDerivedMemberPointer:
1276 case CK_MemberPointerToBoolean:
1277 case CK_ReinterpretMemberPointer:
1278 case CK_AnyPointerToBlockPointerCast:
1279 case CK_ARCProduceObject:
1280 case CK_ARCConsumeObject:
1281 case CK_ARCReclaimReturnedObject:
1282 case CK_ARCExtendBlockObject:
1283 case CK_CopyAndAutoreleaseBlockObject:
1284 case CK_IntToOCLSampler:
1285 case CK_FloatingToFixedPoint:
1286 case CK_FixedPointToFloating:
1287 case CK_FixedPointCast:
1288 case CK_FixedPointToBoolean:
1289 case CK_FixedPointToIntegral:
1290 case CK_IntegralToFixedPoint:
1291 case CK_MatrixCast:
1292 case CK_HLSLVectorTruncation:
1293 case CK_HLSLArrayRValue:
1294 case CK_HLSLElementwiseCast:
1295 case CK_HLSLAggregateSplatCast:
1296 llvm_unreachable("unexpected cast lvalue");
1297
1298 case CK_Dependent:
1299 llvm_unreachable("dependent cast kind in IR gen!");
1300
1301 case CK_BuiltinFnToFnPtr:
1302 llvm_unreachable("builtin functions are handled elsewhere");
1303
1304 case CK_Dynamic: {
1305 LValue lv = emitLValue(e->getSubExpr());
1306 Address v = lv.getAddress();
1307 const auto *dce = cast<CXXDynamicCastExpr>(e);
1309 }
1310
1311 // These are never l-values; just use the aggregate emission code.
1312 case CK_NonAtomicToAtomic:
1313 case CK_AtomicToNonAtomic:
1314 case CK_ToUnion:
1315 case CK_ObjCObjectLValueCast:
1316 case CK_VectorSplat:
1317 case CK_ConstructorConversion:
1318 case CK_UserDefinedConversion:
1319 case CK_CPointerToObjCPointerCast:
1320 case CK_BlockPointerToObjCPointerCast:
1321 case CK_LValueToRValue: {
1322 cgm.errorNYI(e->getSourceRange(),
1323 std::string("emitCastLValue for unhandled cast kind: ") +
1324 e->getCastKindName());
1325
1326 return {};
1327 }
1328 case CK_AddressSpaceConversion: {
1329 LValue lv = emitLValue(e->getSubExpr());
1330 QualType destTy = getContext().getPointerType(e->getType());
1331
1332 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1333 cir::TargetAddressSpaceAttr srcAS;
1334 if (clang::isTargetAddressSpace(srcLangAS))
1335 srcAS = cir::toCIRTargetAddressSpace(getMLIRContext(), srcLangAS);
1336 else
1337 cgm.errorNYI(
1338 e->getSourceRange(),
1339 "emitCastLValue: address space conversion from unknown address "
1340 "space");
1341
1342 mlir::Value v = getTargetHooks().performAddrSpaceCast(
1343 *this, lv.getPointer(), srcAS, convertType(destTy));
1344
1346 lv.getAddress().getAlignment()),
1347 e->getType(), lv.getBaseInfo());
1348 }
1349
1350 case CK_LValueBitCast: {
1351 // This must be a reinterpret_cast (or c-style equivalent).
1352 const auto *ce = cast<ExplicitCastExpr>(e);
1353
1354 cgm.emitExplicitCastExprType(ce, this);
1355 LValue LV = emitLValue(e->getSubExpr());
1357 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1358
1359 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1360 }
1361
1362 case CK_NoOp: {
1363 // CK_NoOp can model a qualification conversion, which can remove an array
1364 // bound and change the IR type.
1365 LValue lv = emitLValue(e->getSubExpr());
1366 // Propagate the volatile qualifier to LValue, if exists in e.
1368 cgm.errorNYI(e->getSourceRange(),
1369 "emitCastLValue: NoOp changes volatile qual");
1370 if (lv.isSimple()) {
1371 Address v = lv.getAddress();
1372 if (v.isValid()) {
1373 mlir::Type ty = convertTypeForMem(e->getType());
1374 if (v.getElementType() != ty)
1375 cgm.errorNYI(e->getSourceRange(),
1376 "emitCastLValue: NoOp needs bitcast");
1377 }
1378 }
1379 return lv;
1380 }
1381
1382 case CK_UncheckedDerivedToBase:
1383 case CK_DerivedToBase: {
1384 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1385
1386 LValue lv = emitLValue(e->getSubExpr());
1387 Address thisAddr = lv.getAddress();
1388
1389 // Perform the derived-to-base conversion
1390 Address baseAddr =
1391 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1392 /*NullCheckValue=*/false, e->getExprLoc());
1393
1394 // TODO: Support accesses to members of base classes in TBAA. For now, we
1395 // conservatively pretend that the complete object is of the base class
1396 // type.
1398 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1399 }
1400
1401 case CK_BaseToDerived: {
1402 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1403 LValue lv = emitLValue(e->getSubExpr());
1404
1405 // Perform the base-to-derived conversion
1407 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1408 e->path(), /*NullCheckValue=*/false);
1409 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1410 // performed and the object is not of the derived type.
1412
1414 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1415 }
1416
1417 case CK_ZeroToOCLOpaqueType:
1418 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1419 }
1420
1421 llvm_unreachable("Invalid cast kind");
1422}
1423
1425 const MemberExpr *me) {
1426 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1427 // Try to emit static variable member expressions as DREs.
1428 return DeclRefExpr::Create(
1430 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1431 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1432 }
1433 return nullptr;
1434}
1435
1437 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1439 return emitDeclRefLValue(dre);
1440 }
1441
1442 Expr *baseExpr = e->getBase();
1443 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1444 LValue baseLV;
1445 if (e->isArrow()) {
1446 LValueBaseInfo baseInfo;
1448 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1449 QualType ptrTy = baseExpr->getType()->getPointeeType();
1451 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1452 } else {
1454 baseLV = emitLValue(baseExpr);
1455 }
1456
1457 const NamedDecl *nd = e->getMemberDecl();
1458 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1459 LValue lv = emitLValueForField(baseLV, field);
1461 if (getLangOpts().OpenMP) {
1462 // If the member was explicitly marked as nontemporal, mark it as
1463 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1464 // to children as nontemporal too.
1465 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1466 }
1467 return lv;
1468 }
1469
1470 if (isa<FunctionDecl>(nd)) {
1471 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1472 return LValue();
1473 }
1474
1475 llvm_unreachable("Unhandled member declaration!");
1476}
1477
1478/// Evaluate an expression into a given memory location.
1480 Qualifiers quals, bool isInit) {
1481 // FIXME: This function should take an LValue as an argument.
1482 switch (getEvaluationKind(e->getType())) {
1483 case cir::TEK_Complex: {
1484 LValue lv = makeAddrLValue(location, e->getType());
1485 emitComplexExprIntoLValue(e, lv, isInit);
1486 return;
1487 }
1488
1489 case cir::TEK_Aggregate: {
1490 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1494 return;
1495 }
1496
1497 case cir::TEK_Scalar: {
1499 LValue lv = makeAddrLValue(location, e->getType());
1500 emitStoreThroughLValue(rv, lv);
1501 return;
1502 }
1503 }
1504
1505 llvm_unreachable("bad evaluation kind");
1506}
1507
1509 const MaterializeTemporaryExpr *m,
1510 const Expr *inner) {
1511 // TODO(cir): cgf.getTargetHooks();
1512 switch (m->getStorageDuration()) {
1513 case SD_FullExpression:
1514 case SD_Automatic: {
1515 QualType ty = inner->getType();
1516
1518
1519 // The temporary memory should be created in the same scope as the extending
1520 // declaration of the temporary materialization expression.
1521 cir::AllocaOp extDeclAlloca;
1522 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1523 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1524 if (extDeclAddrIter != cgf.localDeclMap.end())
1525 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1526 }
1527 mlir::OpBuilder::InsertPoint ip;
1528 if (extDeclAlloca)
1529 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1530 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1531 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1532 ip);
1533 }
1534 case SD_Thread:
1535 case SD_Static: {
1536 cgf.cgm.errorNYI(
1537 m->getSourceRange(),
1538 "createReferenceTemporary: static/thread storage duration");
1539 return Address::invalid();
1540 }
1541
1542 case SD_Dynamic:
1543 llvm_unreachable("temporary can't have dynamic storage duration");
1544 }
1545 llvm_unreachable("unknown storage duration");
1546}
1547
1549 const MaterializeTemporaryExpr *m,
1550 const Expr *e, Address referenceTemporary) {
1551 // Objective-C++ ARC:
1552 // If we are binding a reference to a temporary that has ownership, we
1553 // need to perform retain/release operations on the temporary.
1554 //
1555 // FIXME(ogcg): This should be looking at e, not m.
1556 if (m->getType().getObjCLifetime()) {
1557 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1558 return;
1559 }
1560
1562 if (dk == QualType::DK_none)
1563 return;
1564
1565 switch (m->getStorageDuration()) {
1566 case SD_Static:
1567 case SD_Thread: {
1568 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1569 if (const auto *classDecl =
1571 classDecl && !classDecl->hasTrivialDestructor())
1572 // Get the destructor for the reference temporary.
1573 referenceTemporaryDtor = classDecl->getDestructor();
1574
1575 if (!referenceTemporaryDtor)
1576 return;
1577
1578 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1579 "storage duration with destructors");
1580 break;
1581 }
1582
1583 case SD_FullExpression:
1584 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1586 break;
1587
1588 case SD_Automatic:
1589 cgf.cgm.errorNYI(e->getSourceRange(),
1590 "pushTemporaryCleanup: automatic storage duration");
1591 break;
1592
1593 case SD_Dynamic:
1594 llvm_unreachable("temporary cannot have dynamic storage duration");
1595 }
1596}
1597
1599 const MaterializeTemporaryExpr *m) {
1600 const Expr *e = m->getSubExpr();
1601
1602 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1603 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1604 "Reference should never be pseudo-strong!");
1605
1606 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1607 // as that will cause the lifetime adjustment to be lost for ARC
1608 auto ownership = m->getType().getObjCLifetime();
1609 if (ownership != Qualifiers::OCL_None &&
1610 ownership != Qualifiers::OCL_ExplicitNone) {
1611 cgm.errorNYI(e->getSourceRange(),
1612 "emitMaterializeTemporaryExpr: ObjCLifetime");
1613 return {};
1614 }
1615
1618 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1619
1620 for (const Expr *ignored : commaLHSs)
1621 emitIgnoredExpr(ignored);
1622
1623 if (isa<OpaqueValueExpr>(e)) {
1624 cgm.errorNYI(e->getSourceRange(),
1625 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1626 return {};
1627 }
1628
1629 // Create and initialize the reference temporary.
1630 Address object = createReferenceTemporary(*this, m, e);
1631
1632 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1633 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1634 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1635 return {};
1636 } else {
1638 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1639 }
1640 pushTemporaryCleanup(*this, m, e, object);
1641
1642 // Perform derived-to-base casts and/or field accesses, to get from the
1643 // temporary object we created (and, potentially, for which we extended
1644 // the lifetime) to the subobject we're binding the reference to.
1645 if (!adjustments.empty()) {
1646 cgm.errorNYI(e->getSourceRange(),
1647 "emitMaterializeTemporaryExpr: Adjustments");
1648 return {};
1649 }
1650
1651 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1652}
1653
1654LValue
1657
1658 auto it = opaqueLValues.find(e);
1659 if (it != opaqueLValues.end())
1660 return it->second;
1661
1662 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1663 return emitLValue(e->getSourceExpr());
1664}
1665
1666RValue
1669
1670 auto it = opaqueRValues.find(e);
1671 if (it != opaqueRValues.end())
1672 return it->second;
1673
1674 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1675 return emitAnyExpr(e->getSourceExpr());
1676}
1677
1679 if (e->isFileScope()) {
1680 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1681 return {};
1682 }
1683
1684 if (e->getType()->isVariablyModifiedType()) {
1685 cgm.errorNYI(e->getSourceRange(),
1686 "emitCompoundLiteralLValue: VariablyModifiedType");
1687 return {};
1688 }
1689
1690 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1691 ".compoundliteral");
1692 const Expr *initExpr = e->getInitializer();
1693 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1694
1695 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1696 /*Init*/ true);
1697
1698 // Block-scope compound literals are destroyed at the end of the enclosing
1699 // scope in C.
1700 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1701 cgm.errorNYI(e->getSourceRange(),
1702 "emitCompoundLiteralLValue: non C++ DestructedType");
1703 return {};
1704 }
1705
1706 return result;
1707}
1708
1710 RValue rv = emitCallExpr(e);
1711
1712 if (!rv.isScalar()) {
1713 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1714 return {};
1715 }
1716
1717 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1718 "Can't have a scalar return unless the return type is a "
1719 "reference type!");
1720
1722}
1723
1725 // Comma expressions just emit their LHS then their RHS as an l-value.
1726 if (e->getOpcode() == BO_Comma) {
1727 emitIgnoredExpr(e->getLHS());
1728 return emitLValue(e->getRHS());
1729 }
1730
1731 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) {
1732 cgm.errorNYI(e->getSourceRange(), "member pointers");
1733 return {};
1734 }
1735
1736 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1737
1738 // Note that in all of these cases, __block variables need the RHS
1739 // evaluated first just in case the variable gets moved by the RHS.
1740
1742 case cir::TEK_Scalar: {
1744 if (e->getLHS()->getType().getObjCLifetime() !=
1746 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1747 return {};
1748 }
1749
1750 RValue rv = emitAnyExpr(e->getRHS());
1751 LValue lv = emitLValue(e->getLHS());
1752
1753 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1754 if (lv.isBitField())
1756 else
1757 emitStoreThroughLValue(rv, lv);
1758
1759 if (getLangOpts().OpenMP) {
1760 cgm.errorNYI(e->getSourceRange(), "openmp");
1761 return {};
1762 }
1763
1764 return lv;
1765 }
1766
1767 case cir::TEK_Complex: {
1769 }
1770
1771 case cir::TEK_Aggregate:
1772 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1773 return {};
1774 }
1775 llvm_unreachable("bad evaluation kind");
1776}
1777
1778/// Emit code to compute the specified expression which
1779/// can have any type. The result is returned as an RValue struct.
1781 bool ignoreResult) {
1783 case cir::TEK_Scalar:
1784 return RValue::get(emitScalarExpr(e, ignoreResult));
1785 case cir::TEK_Complex:
1787 case cir::TEK_Aggregate: {
1788 if (!ignoreResult && aggSlot.isIgnored())
1789 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1791 emitAggExpr(e, aggSlot);
1792 return aggSlot.asRValue();
1793 }
1794 }
1795 llvm_unreachable("bad evaluation kind");
1796}
1797
1798// Detect the unusual situation where an inline version is shadowed by a
1799// non-inline version. In that case we should pick the external one
1800// everywhere. That's GCC behavior too.
1802 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1803 if (!pd->isInlineBuiltinDeclaration())
1804 return false;
1805 return true;
1806}
1807
1808CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1809 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1810
1811 if (unsigned builtinID = fd->getBuiltinID()) {
1812 StringRef ident = cgm.getMangledName(gd);
1813 std::string fdInlineName = (ident + ".inline").str();
1814
1815 bool isPredefinedLibFunction =
1816 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1817 // Assume nobuiltins everywhere until we actually read the attributes.
1818 bool hasAttributeNoBuiltin = true;
1820
1821 // When directing calling an inline builtin, call it through it's mangled
1822 // name to make it clear it's not the actual builtin.
1823 auto fn = cast<cir::FuncOp>(curFn);
1824 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1825 cir::FuncOp clone =
1826 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1827
1828 if (!clone) {
1829 // Create a forward declaration - the body will be generated in
1830 // generateCode when the function definition is processed
1831 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1832 mlir::OpBuilder::InsertionGuard guard(builder);
1833 builder.setInsertionPointToStart(cgm.getModule().getBody());
1834
1835 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1836 calleeFunc.getFunctionType());
1837 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1838 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1839 clone.setSymVisibility("private");
1840 clone.setInlineKindAttr(cir::InlineAttr::get(
1841 &cgm.getMLIRContext(), cir::InlineKind::AlwaysInline));
1842 }
1843 return CIRGenCallee::forDirect(clone, gd);
1844 }
1845
1846 // Replaceable builtins provide their own implementation of a builtin. If we
1847 // are in an inline builtin implementation, avoid trivial infinite
1848 // recursion. Honor __attribute__((no_builtin("foo"))) or
1849 // __attribute__((no_builtin)) on the current function unless foo is
1850 // not a predefined library function which means we must generate the
1851 // builtin no matter what.
1852 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1853 return CIRGenCallee::forBuiltin(builtinID, fd);
1854 }
1855
1856 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1857
1858 assert(!cir::MissingFeatures::hip());
1859
1860 return CIRGenCallee::forDirect(callee, gd);
1861}
1862
1864 if (ty->isVoidType())
1865 return RValue::get(nullptr);
1866
1867 cgm.errorNYI("unsupported type for undef rvalue");
1868 return RValue::get(nullptr);
1869}
1870
1872 const CIRGenCallee &origCallee,
1873 const clang::CallExpr *e,
1875 // Get the actual function type. The callee type will always be a pointer to
1876 // function type or a block pointer type.
1877 assert(calleeTy->isFunctionPointerType() &&
1878 "Callee must have function pointer type!");
1879
1880 calleeTy = getContext().getCanonicalType(calleeTy);
1881 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
1882
1883 CIRGenCallee callee = origCallee;
1884
1885 if (getLangOpts().CPlusPlus)
1887
1888 const auto *fnType = cast<FunctionType>(pointeeTy);
1889
1891
1892 CallArgList args;
1894
1895 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
1896 e->getDirectCallee());
1897
1898 const CIRGenFunctionInfo &funcInfo =
1899 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
1900
1901 // C99 6.5.2.2p6:
1902 // If the expression that denotes the called function has a type that does
1903 // not include a prototype, [the default argument promotions are performed].
1904 // If the number of arguments does not equal the number of parameters, the
1905 // behavior is undefined. If the function is defined with a type that
1906 // includes a prototype, and either the prototype ends with an ellipsis (,
1907 // ...) or the types of the arguments after promotion are not compatible
1908 // with the types of the parameters, the behavior is undefined. If the
1909 // function is defined with a type that does not include a prototype, and
1910 // the types of the arguments after promotion are not compatible with those
1911 // of the parameters after promotion, the behavior is undefined [except in
1912 // some trivial cases].
1913 // That is, in the general case, we should assume that a call through an
1914 // unprototyped function type works like a *non-variadic* call. The way we
1915 // make this work is to cast to the exxact type fo the promoted arguments.
1916 if (isa<FunctionNoProtoType>(fnType)) {
1919 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
1920 // get non-variadic function type
1921 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
1922 calleeTy.getReturnType(), false);
1923 auto calleePtrTy = cir::PointerType::get(calleeTy);
1924
1925 mlir::Operation *fn = callee.getFunctionPointer();
1926 mlir::Value addr;
1927 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
1928 addr = cir::GetGlobalOp::create(
1929 builder, getLoc(e->getSourceRange()),
1930 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
1931 } else {
1932 addr = fn->getResult(0);
1933 }
1934
1935 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
1936 callee.setFunctionPointer(fn);
1937 }
1938
1940 assert(!cir::MissingFeatures::hip());
1942
1943 cir::CIRCallOpInterface callOp;
1944 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
1945 getLoc(e->getExprLoc()));
1946
1948
1949 return callResult;
1950}
1951
1953 e = e->IgnoreParens();
1954
1955 // Look through function-to-pointer decay.
1956 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
1957 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
1958 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
1959 return emitCallee(implicitCast->getSubExpr());
1960 }
1961 // When performing an indirect call through a function pointer lvalue, the
1962 // function pointer lvalue is implicitly converted to an rvalue through an
1963 // lvalue-to-rvalue conversion.
1964 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
1965 "unexpected implicit cast on function pointers");
1966 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
1967 // Resolve direct calls.
1968 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
1969 return emitDirectCallee(funcDecl);
1970 } else if (auto me = dyn_cast<MemberExpr>(e)) {
1971 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
1972 emitIgnoredExpr(me->getBase());
1973 return emitDirectCallee(fd);
1974 }
1975 // Else fall through to the indirect reference handling below.
1976 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
1978 }
1979
1980 // Otherwise, we have an indirect reference.
1981 mlir::Value calleePtr;
1983 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
1984 calleePtr = emitScalarExpr(e);
1985 functionType = ptrType->getPointeeType();
1986 } else {
1987 functionType = e->getType();
1988 calleePtr = emitLValue(e).getPointer();
1989 }
1990 assert(functionType->isFunctionType());
1991
1992 GlobalDecl gd;
1993 if (const auto *vd =
1994 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
1995 gd = GlobalDecl(vd);
1996
1997 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
1998 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
1999 return callee;
2000}
2001
2005
2006 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2008
2009 if (isa<CUDAKernelCallExpr>(e)) {
2010 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
2011 return RValue::get(nullptr);
2012 }
2013
2014 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2015 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2016 // operator member call.
2017 if (const CXXMethodDecl *md =
2018 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2019 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2020 // A CXXOperatorCallExpr is created even for explicit object methods, but
2021 // these should be treated like static function calls. Fall through to do
2022 // that.
2023 }
2024
2025 CIRGenCallee callee = emitCallee(e->getCallee());
2026
2027 if (callee.isBuiltin())
2028 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2029 returnValue);
2030
2031 if (callee.isPseudoDestructor())
2033
2034 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2035}
2036
2037/// Emit code to compute the specified expression, ignoring the result.
2039 if (e->isPRValue()) {
2040 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2041 return;
2042 }
2043
2044 // Just emit it as an l-value and drop the result.
2045 emitLValue(e);
2046}
2047
2049 LValueBaseInfo *baseInfo) {
2051 assert(e->getType()->isArrayType() &&
2052 "Array to pointer decay must have array source type!");
2053
2054 // Expressions of array type can't be bitfields or vector elements.
2055 LValue lv = emitLValue(e);
2056 Address addr = lv.getAddress();
2057
2058 // If the array type was an incomplete type, we need to make sure
2059 // the decay ends up being the right type.
2060 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2061
2062 if (e->getType()->isVariableArrayType())
2063 return addr;
2064
2065 [[maybe_unused]] auto pointeeTy =
2066 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2067
2068 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2069 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2070 assert(pointeeTy == arrayTy);
2071
2072 // The result of this decay conversion points to an array element within the
2073 // base lvalue. However, since TBAA currently does not support representing
2074 // accesses to elements of member arrays, we conservatively represent accesses
2075 // to the pointee object as if it had no any base lvalue specified.
2076 // TODO: Support TBAA for member arrays.
2079
2080 mlir::Value ptr = builder.maybeBuildArrayDecay(
2081 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2082 convertTypeForMem(eltType));
2083 return Address(ptr, addr.getAlignment());
2084}
2085
2086/// Given the address of a temporary variable, produce an r-value of its type.
2090 switch (getEvaluationKind(type)) {
2091 case cir::TEK_Complex:
2092 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2093 case cir::TEK_Aggregate:
2094 cgm.errorNYI(loc, "convertTempToRValue: aggregate type");
2095 return RValue::get(nullptr);
2096 case cir::TEK_Scalar:
2097 return RValue::get(emitLoadOfScalar(lvalue, loc));
2098 }
2099 llvm_unreachable("bad evaluation kind");
2100}
2101
2102/// Emit an `if` on a boolean condition, filling `then` and `else` into
2103/// appropriated regions.
2104mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2105 const Stmt *thenS,
2106 const Stmt *elseS) {
2107 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2108 std::optional<mlir::Location> elseLoc;
2109 if (elseS)
2110 elseLoc = getLoc(elseS->getSourceRange());
2111
2112 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2114 cond, /*thenBuilder=*/
2115 [&](mlir::OpBuilder &, mlir::Location) {
2116 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2117 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2118 },
2119 thenLoc,
2120 /*elseBuilder=*/
2121 [&](mlir::OpBuilder &, mlir::Location) {
2122 assert(elseLoc && "Invalid location for elseS.");
2123 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2124 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2125 },
2126 elseLoc);
2127
2128 return mlir::LogicalResult::success(resThen.succeeded() &&
2129 resElse.succeeded());
2130}
2131
2132/// Emit an `if` on a boolean condition, filling `then` and `else` into
2133/// appropriated regions.
2135 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2136 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2137 std::optional<mlir::Location> elseLoc) {
2138 // Attempt to be as accurate as possible with IfOp location, generate
2139 // one fused location that has either 2 or 4 total locations, depending
2140 // on else's availability.
2141 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2142 if (elseLoc)
2143 ifLocs.push_back(*elseLoc);
2144 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2145
2146 // Emit the code with the fully general case.
2147 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2148 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2149 /*thenBuilder=*/thenBuilder,
2150 /*elseBuilder=*/elseBuilder);
2151}
2152
2153/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2154mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2155 const Expr *cond) {
2158 cond = cond->IgnoreParens();
2159
2160 // In LLVM the condition is reversed here for efficient codegen.
2161 // This should be done in CIR prior to LLVM lowering, if we do now
2162 // we can make CIR based diagnostics misleading.
2163 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2165
2166 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2167 Expr *trueExpr = condOp->getTrueExpr();
2168 Expr *falseExpr = condOp->getFalseExpr();
2169 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2170
2171 mlir::Value ternaryOpRes =
2172 cir::TernaryOp::create(
2173 builder, loc, condV, /*thenBuilder=*/
2174 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2175 mlir::Value lhs = emitScalarExpr(trueExpr);
2176 cir::YieldOp::create(b, loc, lhs);
2177 },
2178 /*elseBuilder=*/
2179 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2180 mlir::Value rhs = emitScalarExpr(falseExpr);
2181 cir::YieldOp::create(b, loc, rhs);
2182 })
2183 .getResult();
2184
2185 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2186 getContext().BoolTy, condOp->getExprLoc());
2187 }
2188
2189 if (isa<CXXThrowExpr>(cond)) {
2190 cgm.errorNYI("NYI");
2191 return createDummyValue(loc, cond->getType());
2192 }
2193
2194 // If the branch has a condition wrapped by __builtin_unpredictable,
2195 // create metadata that specifies that the branch is unpredictable.
2196 // Don't bother if not optimizing because that metadata would not be used.
2198
2199 // Emit the code with the fully general case.
2200 return evaluateExprAsBool(cond);
2201}
2202
2203mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2204 mlir::Location loc, CharUnits alignment,
2205 bool insertIntoFnEntryBlock,
2206 mlir::Value arraySize) {
2207 mlir::Block *entryBlock = insertIntoFnEntryBlock
2209 : curLexScope->getEntryBlock();
2210
2211 // If this is an alloca in the entry basic block of a cir.try and there's
2212 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2213 // scope instead. This is necessary in order to guarantee all SSA values are
2214 // reachable during cleanups.
2215 if (auto tryOp =
2216 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2217 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2218 entryBlock = &scopeOp.getScopeRegion().front();
2219 }
2220
2221 return emitAlloca(name, ty, loc, alignment,
2222 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2223}
2224
2225mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2226 mlir::Location loc, CharUnits alignment,
2227 mlir::OpBuilder::InsertPoint ip,
2228 mlir::Value arraySize) {
2229 // CIR uses its own alloca address space rather than follow the target data
2230 // layout like original CodeGen. The data layout awareness should be done in
2231 // the lowering pass instead.
2232 cir::PointerType localVarPtrTy =
2234 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2235
2236 mlir::Value addr;
2237 {
2238 mlir::OpBuilder::InsertionGuard guard(builder);
2239 builder.restoreInsertionPoint(ip);
2240 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2241 /*var type*/ ty, name, alignIntAttr, arraySize);
2243 }
2244 return addr;
2245}
2246
2247// Note: this function also emit constructor calls to support a MSVC extensions
2248// allowing explicit constructor function call.
2251 const Expr *callee = ce->getCallee()->IgnoreParens();
2252
2253 if (isa<BinaryOperator>(callee)) {
2254 cgm.errorNYI(ce->getSourceRange(),
2255 "emitCXXMemberCallExpr: C++ binary operator");
2256 return RValue::get(nullptr);
2257 }
2258
2259 const auto *me = cast<MemberExpr>(callee);
2260 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2261
2262 if (md->isStatic()) {
2263 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2264 return RValue::get(nullptr);
2265 }
2266
2267 bool hasQualifier = me->hasQualifier();
2268 NestedNameSpecifier qualifier = me->getQualifier();
2269 bool isArrow = me->isArrow();
2270 const Expr *base = me->getBase();
2271
2273 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2274}
2275
2277 // Emit the expression as an lvalue.
2278 LValue lv = emitLValue(e);
2279 assert(lv.isSimple());
2280 mlir::Value value = lv.getPointer();
2281
2283
2284 return RValue::get(value);
2285}
2286
2288 LValueBaseInfo *pointeeBaseInfo) {
2289 if (refLVal.isVolatile())
2290 cgm.errorNYI(loc, "load of volatile reference");
2291
2292 cir::LoadOp load =
2293 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2294 refLVal.getAddress().getPointer());
2295
2297
2298 QualType pointeeType = refLVal.getType()->getPointeeType();
2299 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2300 return Address(load, convertTypeForMem(pointeeType), align);
2301}
2302
2304 mlir::Location loc,
2305 QualType refTy,
2306 AlignmentSource source) {
2307 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2308 LValueBaseInfo pointeeBaseInfo;
2310 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2311 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2312 pointeeBaseInfo);
2313}
2314
2315void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2316 cir::TrapOp::create(builder, loc);
2317 if (createNewBlock)
2318 builder.createBlock(builder.getBlock()->getParent());
2319}
2320
2322 bool createNewBlock) {
2324 cir::UnreachableOp::create(builder, getLoc(loc));
2325 if (createNewBlock)
2326 builder.createBlock(builder.getBlock()->getParent());
2327}
2328
2329mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2330 clang::QualType qt) {
2331 mlir::Type t = convertType(qt);
2332 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2333 return builder.createDummyValue(loc, t, alignment);
2334}
2335
2336//===----------------------------------------------------------------------===//
2337// CIR builder helpers
2338//===----------------------------------------------------------------------===//
2339
2341 const Twine &name, Address *alloca,
2342 mlir::OpBuilder::InsertPoint ip) {
2343 // FIXME: Should we prefer the preferred type alignment here?
2344 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2345 alloca, ip);
2346}
2347
2349 mlir::Location loc, const Twine &name,
2350 Address *alloca,
2351 mlir::OpBuilder::InsertPoint ip) {
2352 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2353 /*ArraySize=*/nullptr, alloca, ip);
2354 if (ty->isConstantMatrixType()) {
2356 cgm.errorNYI(loc, "temporary matrix value");
2357 }
2358 return result;
2359}
2360
2361/// This creates a alloca and inserts it into the entry block of the
2362/// current region.
2364 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2365 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2366 cir::AllocaOp alloca = ip.isSet()
2367 ? createTempAlloca(ty, loc, name, ip, arraySize)
2368 : createTempAlloca(ty, loc, name, arraySize);
2369 alloca.setAlignmentAttr(cgm.getSize(align));
2370 return Address(alloca, ty, align);
2371}
2372
2373/// This creates a alloca and inserts it into the entry block. The alloca is
2374/// casted to default address space if necessary.
2375// TODO(cir): Implement address space casting to match classic codegen's
2376// CreateTempAlloca behavior with DestLangAS parameter
2378 mlir::Location loc, const Twine &name,
2379 mlir::Value arraySize,
2380 Address *allocaAddr,
2381 mlir::OpBuilder::InsertPoint ip) {
2382 Address alloca =
2383 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2384 if (allocaAddr)
2385 *allocaAddr = alloca;
2386 mlir::Value v = alloca.getPointer();
2387 // Alloca always returns a pointer in alloca address space, which may
2388 // be different from the type defined by the language. For example,
2389 // in C++ the auto variables are in the default address space. Therefore
2390 // cast alloca to the default address space when necessary.
2391
2392 LangAS allocaAS = alloca.getAddressSpace()
2394 alloca.getAddressSpace().getValue().getUInt())
2399 getCIRAllocaAddressSpace().getValue().getUInt());
2400 }
2401
2402 if (dstTyAS != allocaAS) {
2404 builder.getPointerTo(ty, dstTyAS));
2405 }
2406 return Address(v, ty, align);
2407}
2408
2409/// This creates an alloca and inserts it into the entry block if \p ArraySize
2410/// is nullptr, otherwise inserts it at the current insertion point of the
2411/// builder.
2412cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2413 mlir::Location loc,
2414 const Twine &name,
2415 mlir::Value arraySize,
2416 bool insertIntoFnEntryBlock) {
2417 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2418 insertIntoFnEntryBlock, arraySize)
2419 .getDefiningOp());
2420}
2421
2422/// This creates an alloca and inserts it into the provided insertion point
2423cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2424 mlir::Location loc,
2425 const Twine &name,
2426 mlir::OpBuilder::InsertPoint ip,
2427 mlir::Value arraySize) {
2428 assert(ip.isSet() && "Insertion point is not set");
2429 return mlir::cast<cir::AllocaOp>(
2430 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2431 .getDefiningOp());
2432}
2433
2434/// Try to emit a reference to the given value without producing it as
2435/// an l-value. For many cases, this is just an optimization, but it avoids
2436/// us needing to emit global copies of variables if they're named without
2437/// triggering a formal use in a context where we can't emit a direct
2438/// reference to them, for instance if a block or lambda or a member of a
2439/// local class uses a const int variable or constexpr variable from an
2440/// enclosing function.
2441///
2442/// For named members of enums, this is the only way they are emitted.
2445 const ValueDecl *value = refExpr->getDecl();
2446
2447 // There is a lot more to do here, but for now only EnumConstantDecl is
2448 // supported.
2450
2451 // The value needs to be an enum constant or a constant variable.
2452 if (!isa<EnumConstantDecl>(value))
2453 return ConstantEmission();
2454
2455 Expr::EvalResult result;
2456 if (!refExpr->EvaluateAsRValue(result, getContext()))
2457 return ConstantEmission();
2458
2459 QualType resultType = refExpr->getType();
2460
2461 // As long as we're only handling EnumConstantDecl, there should be no
2462 // side-effects.
2463 assert(!result.HasSideEffects);
2464
2465 // Emit as a constant.
2466 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2467 // somewhat heavy refactoring...)
2468 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2469 refExpr->getLocation(), result.Val, resultType);
2470 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2471 assert(cstToEmit && "expected a typed attribute");
2472
2474
2475 return ConstantEmission::forValue(cstToEmit);
2476}
2477
2481 return tryEmitAsConstant(dre);
2482 return ConstantEmission();
2483}
2484
2486 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2487 assert(constant && "not a constant");
2488 if (constant.isReference()) {
2489 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2490 return {};
2491 }
2492 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2493}
2494
2496 const StringLiteral *sl = e->getFunctionName();
2497 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2498 auto fn = cast<cir::FuncOp>(curFn);
2499 StringRef fnName = fn.getName();
2500 fnName.consume_front("\01");
2501 std::array<StringRef, 2> nameItems = {
2503 std::string gvName = llvm::join(nameItems, ".");
2504 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2505 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2506
2507 return emitStringLiteralLValue(sl, gvName);
2508}
2509
2514
2515namespace {
2516// Handle the case where the condition is a constant evaluatable simple integer,
2517// which means we don't have to separately handle the true/false blocks.
2518std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2520 const Expr *condExpr = e->getCond();
2521 llvm::APSInt condExprVal;
2522 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2523 return std::nullopt;
2524
2525 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2526 if (!condExprVal.getBoolValue())
2527 std::swap(live, dead);
2528
2529 if (cgf.containsLabel(dead))
2530 return std::nullopt;
2531
2532 // If the true case is live, we need to track its region.
2535 // If a throw expression we emit it and return an undefined lvalue
2536 // because it can't be used.
2537 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2538 cgf.emitCXXThrowExpr(throwExpr);
2539 // Return an undefined lvalue - the throw terminates execution
2540 // so this value will never actually be used
2541 mlir::Type elemTy = cgf.convertType(dead->getType());
2542 mlir::Value undefPtr =
2543 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2544 cgf.getLoc(throwExpr->getSourceRange()));
2545 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2546 dead->getType());
2547 }
2548 return cgf.emitLValue(live);
2549}
2550
2551/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2552/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2553/// LValue is returned and the current block has been terminated.
2554static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2555 const Expr *operand) {
2556 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2557 cgf.emitCXXThrowExpr(throwExpr);
2558 return std::nullopt;
2559 }
2560
2561 return cgf.emitLValue(operand);
2562}
2563} // namespace
2564
2565// Create and generate the 3 blocks for a conditional operator.
2566// Leaves the 'current block' in the continuation basic block.
2567template <typename FuncTy>
2570 const FuncTy &branchGenFunc) {
2571 ConditionalInfo info;
2572 ConditionalEvaluation eval(*this);
2573 mlir::Location loc = getLoc(e->getSourceRange());
2574 CIRGenBuilderTy &builder = getBuilder();
2575
2576 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2578 mlir::Type yieldTy{};
2579
2580 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2581 const Expr *expr, std::optional<LValue> &resultLV) {
2582 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2583 curLexScope->setAsTernary();
2584
2586 eval.beginEvaluation();
2587 resultLV = branchGenFunc(*this, expr);
2588 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2589 eval.endEvaluation();
2590
2591 if (resultPtr) {
2592 yieldTy = resultPtr.getType();
2593 cir::YieldOp::create(b, loc, resultPtr);
2594 } else {
2595 // If LHS or RHS is a void expression we need
2596 // to patch arms as to properly match yield types.
2597 // If the current block's terminator is an UnreachableOp (from a throw),
2598 // we don't need a yield
2599 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2600 mlir::Operation *terminator =
2601 builder.getInsertionBlock()->getTerminator();
2602 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2603 insertPoints.push_back(b.saveInsertionPoint());
2604 }
2605 }
2606 };
2607
2608 info.result = cir::TernaryOp::create(
2609 builder, loc, condV,
2610 /*trueBuilder=*/
2611 [&](mlir::OpBuilder &b, mlir::Location loc) {
2612 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2613 },
2614 /*falseBuilder=*/
2615 [&](mlir::OpBuilder &b, mlir::Location loc) {
2616 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2617 })
2618 .getResult();
2619
2620 // If both arms are void, so be it.
2621 if (!yieldTy)
2622 yieldTy = voidTy;
2623
2624 // Insert required yields.
2625 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2626 mlir::OpBuilder::InsertionGuard guard(builder);
2627 builder.restoreInsertionPoint(toInsert);
2628
2629 // Block does not return: build empty yield.
2630 if (!yieldTy) {
2631 cir::YieldOp::create(builder, loc);
2632 } else { // Block returns: set null yield value.
2633 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2634 cir::YieldOp::create(builder, loc, op0);
2635 }
2636 }
2637
2638 return info;
2639}
2640
2643 if (!expr->isGLValue()) {
2644 // ?: here should be an aggregate.
2645 assert(hasAggregateEvaluationKind(expr->getType()) &&
2646 "Unexpected conditional operator!");
2647 return emitAggExprToLValue(expr);
2648 }
2649
2650 OpaqueValueMapping binding(*this, expr);
2651 if (std::optional<LValue> res =
2652 handleConditionalOperatorLValueSimpleCase(*this, expr))
2653 return *res;
2654
2655 ConditionalInfo info =
2656 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2657 return emitLValueOrThrowExpression(cgf, e);
2658 });
2659
2660 if ((info.lhs && !info.lhs->isSimple()) ||
2661 (info.rhs && !info.rhs->isSimple())) {
2662 cgm.errorNYI(expr->getSourceRange(),
2663 "unsupported conditional operator with non-simple lvalue");
2664 return LValue();
2665 }
2666
2667 if (info.lhs && info.rhs) {
2668 Address lhsAddr = info.lhs->getAddress();
2669 Address rhsAddr = info.rhs->getAddress();
2670 Address result(info.result, lhsAddr.getElementType(),
2671 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2672 AlignmentSource alignSource =
2673 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2674 info.rhs->getBaseInfo().getAlignmentSource());
2676 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2677 }
2678
2679 assert((info.lhs || info.rhs) &&
2680 "both operands of glvalue conditional are throw-expressions?");
2681 return info.lhs ? *info.lhs : *info.rhs;
2682}
2683
2684/// An LValue is a candidate for having its loads and stores be made atomic if
2685/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2686/// performing such an operation can be performed without a libcall.
2688 if (!cgm.getLangOpts().MSVolatile)
2689 return false;
2690
2691 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2692 return false;
2693}
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
QualType getElementType() const
Definition TypeBase.h:3734
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
Opcode getOpcode() const
Definition Expr.h:4017
mlir::Value getPointer() const
Definition Address.h:84
mlir::Type getElementType() const
Definition Address.h:111
static Address invalid()
Definition Address.h:69
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
cir::TargetAddressSpaceAttr getAddressSpace() const
Definition Address.h:119
clang::CharUnits getAlignment() const
Definition Address.h:124
mlir::Type getType() const
Definition Address.h:103
bool isValid() const
Definition Address.h:70
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:127
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:121
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:183
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:97
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:125
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
unsigned getBuiltinID() const
Definition CIRGenCall.h:101
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:106
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:145
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:115
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
const TargetCIRGenInfo & getTargetHooks() const
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
ConditionalOperator - The ?
Definition Expr.h:4325
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6498
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4410
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4442
const Expr * getBase() const
Definition Expr.h:6515
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4820
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4760
Represents a function declaration or definition.
Definition Decl.h:2000
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5254
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4312
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9167
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8614
bool isFunctionPointerType() const
Definition TypeBase.h:8582
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8676
bool isPointerType() const
Definition TypeBase.h:8515
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
bool isVariableArrayType() const
Definition TypeBase.h:8626
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorBoolType() const
Definition TypeBase.h:8662
bool isAnyComplexType() const
Definition TypeBase.h:8650
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9044
bool isAtomicType() const
Definition TypeBase.h:8697
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2173
bool hasInit() const
Definition Decl.cpp:2403
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2371
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
Defines the clang::TargetInfo interface.
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:816
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opLoadStoreThreadLocal()
static bool opAllocaNonGC()
static bool opGlobalThreadLocal()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612