clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
19#include "mlir/IR/BuiltinAttributes.h"
20#include "mlir/IR/Value.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/Expr.h"
25#include "clang/AST/ExprCXX.h"
32#include <optional>
33
34using namespace clang;
35using namespace clang::CIRGen;
36using namespace cir;
37
38/// Get the address of a zero-sized field within a record. The resulting address
39/// doesn't necessarily have the right type.
41 const FieldDecl *field,
42 llvm::StringRef fieldName,
43 unsigned fieldIndex) {
44 if (field->isZeroSize(getContext())) {
45 cgm.errorNYI(field->getSourceRange(),
46 "emitAddrOfFieldStorage: zero-sized field");
47 return Address::invalid();
48 }
49
50 mlir::Location loc = getLoc(field->getLocation());
51
52 mlir::Type fieldType = convertType(field->getType());
53 auto fieldPtr = cir::PointerType::get(fieldType);
54 // For most cases fieldName is the same as field->getName() but for lambdas,
55 // which do not currently carry the name, so it can be passed down from the
56 // CaptureStmt.
57 cir::GetMemberOp memberAddr = builder.createGetMember(
58 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
59
60 // Retrieve layout information, compute alignment and return the final
61 // address.
62 const RecordDecl *rec = field->getParent();
63 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
64 unsigned idx = layout.getCIRFieldNo(field);
66 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
67 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
68}
69
70/// Given an expression of pointer type, try to
71/// derive a more accurate bound on the alignment of the pointer.
73 LValueBaseInfo *baseInfo) {
74 // We allow this with ObjC object pointers because of fragile ABIs.
75 assert(expr->getType()->isPointerType() ||
76 expr->getType()->isObjCObjectPointerType());
77 expr = expr->IgnoreParens();
78
79 // Casts:
80 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
81 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
82 cgm.emitExplicitCastExprType(ece);
83
84 switch (ce->getCastKind()) {
85 // Non-converting casts (but not C's implicit conversion from void*).
86 case CK_BitCast:
87 case CK_NoOp:
88 case CK_AddressSpaceConversion: {
89 if (const auto *ptrTy =
90 ce->getSubExpr()->getType()->getAs<PointerType>()) {
91 if (ptrTy->getPointeeType()->isVoidType())
92 break;
93
94 LValueBaseInfo innerBaseInfo;
96 Address addr =
97 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
98 if (baseInfo)
99 *baseInfo = innerBaseInfo;
100
101 if (isa<ExplicitCastExpr>(ce)) {
102 LValueBaseInfo targetTypeBaseInfo;
103
104 const QualType pointeeType = expr->getType()->getPointeeType();
105 const CharUnits align =
106 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
107
108 // If the source l-value is opaque, honor the alignment of the
109 // casted-to type.
110 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
111 if (baseInfo)
112 baseInfo->mergeForCast(targetTypeBaseInfo);
113 addr = Address(addr.getPointer(), addr.getElementType(), align);
114 }
115 }
116
118
119 const mlir::Type eltTy =
120 convertTypeForMem(expr->getType()->getPointeeType());
121 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
122 addr, eltTy);
124
125 return addr;
126 }
127 break;
128 }
129
130 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
131 case CK_ArrayToPointerDecay:
132 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
133
134 case CK_UncheckedDerivedToBase:
135 case CK_DerivedToBase: {
138 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
139 const CXXRecordDecl *derived =
140 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
141 return getAddressOfBaseClass(addr, derived, ce->path(),
143 ce->getExprLoc());
144 }
145
146 case CK_AnyPointerToBlockPointerCast:
147 case CK_BaseToDerived:
148 case CK_BaseToDerivedMemberPointer:
149 case CK_BlockPointerToObjCPointerCast:
150 case CK_BuiltinFnToFnPtr:
151 case CK_CPointerToObjCPointerCast:
152 case CK_DerivedToBaseMemberPointer:
153 case CK_Dynamic:
154 case CK_FunctionToPointerDecay:
155 case CK_IntegralToPointer:
156 case CK_LValueToRValue:
157 case CK_LValueToRValueBitCast:
158 case CK_NullToMemberPointer:
159 case CK_NullToPointer:
160 case CK_ReinterpretMemberPointer:
161 // Common pointer conversions, nothing to do here.
162 // TODO: Is there any reason to treat base-to-derived conversions
163 // specially?
164 break;
165
166 case CK_ARCConsumeObject:
167 case CK_ARCExtendBlockObject:
168 case CK_ARCProduceObject:
169 case CK_ARCReclaimReturnedObject:
170 case CK_AtomicToNonAtomic:
171 case CK_BooleanToSignedIntegral:
172 case CK_ConstructorConversion:
173 case CK_CopyAndAutoreleaseBlockObject:
174 case CK_Dependent:
175 case CK_FixedPointCast:
176 case CK_FixedPointToBoolean:
177 case CK_FixedPointToFloating:
178 case CK_FixedPointToIntegral:
179 case CK_FloatingCast:
180 case CK_FloatingComplexCast:
181 case CK_FloatingComplexToBoolean:
182 case CK_FloatingComplexToIntegralComplex:
183 case CK_FloatingComplexToReal:
184 case CK_FloatingRealToComplex:
185 case CK_FloatingToBoolean:
186 case CK_FloatingToFixedPoint:
187 case CK_FloatingToIntegral:
188 case CK_HLSLAggregateSplatCast:
189 case CK_HLSLArrayRValue:
190 case CK_HLSLElementwiseCast:
191 case CK_HLSLVectorTruncation:
192 case CK_HLSLMatrixTruncation:
193 case CK_IntToOCLSampler:
194 case CK_IntegralCast:
195 case CK_IntegralComplexCast:
196 case CK_IntegralComplexToBoolean:
197 case CK_IntegralComplexToFloatingComplex:
198 case CK_IntegralComplexToReal:
199 case CK_IntegralRealToComplex:
200 case CK_IntegralToBoolean:
201 case CK_IntegralToFixedPoint:
202 case CK_IntegralToFloating:
203 case CK_LValueBitCast:
204 case CK_MatrixCast:
205 case CK_MemberPointerToBoolean:
206 case CK_NonAtomicToAtomic:
207 case CK_ObjCObjectLValueCast:
208 case CK_PointerToBoolean:
209 case CK_PointerToIntegral:
210 case CK_ToUnion:
211 case CK_ToVoid:
212 case CK_UserDefinedConversion:
213 case CK_VectorSplat:
214 case CK_ZeroToOCLOpaqueType:
215 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
216 }
217 }
218
219 // Unary &
220 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
221 // TODO(cir): maybe we should use a CIR unary op for pointers here instead.
222 if (uo->getOpcode() == UO_AddrOf) {
223 LValue lv = emitLValue(uo->getSubExpr());
224 if (baseInfo)
225 *baseInfo = lv.getBaseInfo();
227 return lv.getAddress();
228 }
229 }
230
231 // std::addressof and variants.
232 if (auto const *call = dyn_cast<CallExpr>(expr)) {
233 switch (call->getBuiltinCallee()) {
234 default:
235 break;
236 case Builtin::BIaddressof:
237 case Builtin::BI__addressof:
238 case Builtin::BI__builtin_addressof: {
239 LValue lv = emitLValue(call->getArg(0));
240 if (baseInfo)
241 *baseInfo = lv.getBaseInfo();
243 return lv.getAddress();
244 }
245 }
246 }
247
248 // Otherwise, use the alignment of the type.
250 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
251 /*forPointeeType=*/true, baseInfo);
252}
253
255 bool isInit) {
256 if (!dst.isSimple()) {
257 if (dst.isVectorElt()) {
258 // Read/modify/write the vector, inserting the new element
259 const mlir::Location loc = dst.getVectorPointer().getLoc();
260 const mlir::Value vector =
261 builder.createLoad(loc, dst.getVectorAddress());
262 const mlir::Value newVector = cir::VecInsertOp::create(
263 builder, loc, vector, src.getValue(), dst.getVectorIdx());
264 builder.createStore(loc, newVector, dst.getVectorAddress());
265 return;
266 }
267
268 assert(dst.isBitField() && "Unknown LValue type");
270 return;
271
272 cgm.errorNYI(dst.getPointer().getLoc(),
273 "emitStoreThroughLValue: non-simple lvalue");
274 return;
275 }
276
278
279 assert(src.isScalar() && "Can't emit an aggregate store with this method");
280 emitStoreOfScalar(src.getValue(), dst, isInit);
281}
282
283static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
284 const VarDecl *vd) {
285 QualType t = e->getType();
286
287 // If it's thread_local, emit a call to its wrapper function instead.
288 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
289 cgf.cgm.errorNYI(e->getSourceRange(),
290 "emitGlobalVarDeclLValue: thread_local variable");
291
292 // Check if the variable is marked as declare target with link clause in
293 // device codegen.
294 if (cgf.getLangOpts().OpenMP)
295 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
296
297 // Traditional LLVM codegen handles thread local separately, CIR handles
298 // as part of getAddrOfGlobalVar.
299 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
300
301 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
302 cir::PointerType realPtrTy = cir::PointerType::get(
303 realVarTy, mlir::cast<cir::PointerType>(v.getType()).getAddrSpace());
304 if (realPtrTy != v.getType())
305 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
306
307 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
308 Address addr(v, realVarTy, alignment);
309 LValue lv;
310 if (vd->getType()->isReferenceType())
311 lv = cgf.emitLoadOfReferenceLValue(addr, cgf.getLoc(e->getSourceRange()),
313 else
314 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
316 return lv;
317}
318
319void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
320 bool isVolatile, QualType ty,
321 LValueBaseInfo baseInfo, bool isInit,
322 bool isNontemporal) {
323
324 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
325 // Boolean vectors use `iN` as storage type.
326 if (clangVecTy->isExtVectorBoolType())
327 cgm.errorNYI(addr.getPointer().getLoc(),
328 "emitStoreOfScalar ExtVectorBoolType");
329
330 // Handle vectors of size 3 like size 4 for better performance.
331 const mlir::Type elementType = addr.getElementType();
332 const auto vecTy = cast<cir::VectorType>(elementType);
333
334 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
336 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
337 cgm.errorNYI(addr.getPointer().getLoc(),
338 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
339 }
340
341 value = emitToMemory(value, ty);
342
344 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
345 if (ty->isAtomicType() ||
346 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
347 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
348 return;
349 }
350
351 // Update the alloca with more info on initialization.
352 assert(addr.getPointer() && "expected pointer to exist");
353 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
354 if (currVarDecl && srcAlloca) {
355 const VarDecl *vd = currVarDecl;
356 assert(vd && "VarDecl expected");
357 if (vd->hasInit())
358 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
359 }
360
361 assert(currSrcLoc && "must pass in source location");
362 builder.createStore(*currSrcLoc, value, addr, isVolatile);
363
364 if (isNontemporal) {
365 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
366 return;
367 }
368
370}
371
372// TODO: Replace this with a proper TargetInfo function call.
373/// Helper method to check if the underlying ABI is AAPCS
374static bool isAAPCS(const TargetInfo &targetInfo) {
375 return targetInfo.getABI().starts_with("aapcs");
376}
377
379 LValue dst) {
380
381 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
382 mlir::Type resLTy = convertTypeForMem(dst.getType());
383 Address ptr = dst.getBitFieldAddress();
384
385 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
386 dst.isVolatileQualified() &&
387 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
388
389 assert(currSrcLoc && "must pass in source location");
390
391 return builder.createSetBitfield(*currSrcLoc, resLTy, ptr,
392 ptr.getElementType(), src.getValue(), info,
393 dst.isVolatileQualified(), useVoaltile);
394}
395
397 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
398
399 // Get the output type.
400 mlir::Type resLTy = convertType(lv.getType());
401 Address ptr = lv.getBitFieldAddress();
402
403 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
404 isAAPCS(cgm.getTarget());
405
406 mlir::Value field =
407 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
408 info, lv.isVolatile(), useVoaltile);
410 return RValue::get(field);
411}
412
414 const FieldDecl *field,
415 mlir::Type fieldType,
416 unsigned index) {
417 mlir::Location loc = getLoc(field->getLocation());
418 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
420 cir::GetMemberOp sea = getBuilder().createGetMember(
421 loc, fieldPtr, base.getPointer(), field->getName(),
422 rec.isUnion() ? field->getFieldIndex() : index);
424 rec.getElementOffset(cgm.getDataLayout().layout, index));
425 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
426}
427
429 const FieldDecl *field) {
430 LValueBaseInfo baseInfo = base.getBaseInfo();
431 const CIRGenRecordLayout &layout =
432 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
433 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
434
436
437 unsigned idx = layout.getCIRFieldNo(field);
438 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
439
440 mlir::Location loc = getLoc(field->getLocation());
441 if (addr.getElementType() != info.storageType)
442 addr = builder.createElementBitCast(loc, addr, info.storageType);
443
444 QualType fieldType =
446 // TODO(cir): Support TBAA for bit fields.
448 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
449 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
450}
451
453 LValueBaseInfo baseInfo = base.getBaseInfo();
454
455 if (field->isBitField())
456 return emitLValueForBitField(base, field);
457
458 QualType fieldType = field->getType();
459 const RecordDecl *rec = field->getParent();
460 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
461 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
463
464 Address addr = base.getAddress();
465 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
466 if (cgm.getCodeGenOpts().StrictVTablePointers &&
467 classDecl->isDynamicClass()) {
468 cgm.errorNYI(field->getSourceRange(),
469 "emitLValueForField: strict vtable for dynamic class");
470 }
471 }
472
473 unsigned recordCVR = base.getVRQualifiers();
474
475 llvm::StringRef fieldName = field->getName();
476 unsigned fieldIndex;
477 if (cgm.lambdaFieldToName.count(field))
478 fieldName = cgm.lambdaFieldToName[field];
479
480 if (rec->isUnion())
481 fieldIndex = field->getFieldIndex();
482 else {
483 const CIRGenRecordLayout &layout =
484 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
485 fieldIndex = layout.getCIRFieldNo(field);
486 }
487
488 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
490
491 // If this is a reference field, load the reference right now.
492 if (fieldType->isReferenceType()) {
494 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
495 if (recordCVR & Qualifiers::Volatile)
496 refLVal.getQuals().addVolatile();
497 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
498 &fieldBaseInfo);
499
500 // Qualifiers on the struct don't apply to the referencee.
501 recordCVR = 0;
502 fieldType = fieldType->getPointeeType();
503 }
504
505 if (field->hasAttr<AnnotateAttr>()) {
506 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
507 return LValue();
508 }
509
510 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
511 lv.getQuals().addCVRQualifiers(recordCVR);
512
513 // __weak attribute on a field is ignored.
515 cgm.errorNYI(field->getSourceRange(),
516 "emitLValueForField: __weak attribute");
517 return LValue();
518 }
519
520 return lv;
521}
522
524 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
525 QualType fieldType = field->getType();
526
527 if (!fieldType->isReferenceType())
528 return emitLValueForField(base, field);
529
530 const CIRGenRecordLayout &layout =
531 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
532 unsigned fieldIndex = layout.getCIRFieldNo(field);
533
534 Address v =
535 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
536
537 // Make sure that the address is pointing to the right type.
538 mlir::Type memTy = convertTypeForMem(fieldType);
539 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
540
541 // TODO: Generate TBAA information that describes this access as a structure
542 // member access and not just an access to an object of the field's type. This
543 // should be similar to what we do in EmitLValueForField().
544 LValueBaseInfo baseInfo = base.getBaseInfo();
545 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
546 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
548 return makeAddrLValue(v, fieldType, fieldBaseInfo);
549}
550
551/// Converts a scalar value from its primary IR type (as returned
552/// by ConvertType) to its load/store type.
553mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
554 if (auto *atomicTy = ty->getAs<AtomicType>())
555 ty = atomicTy->getValueType();
556
557 if (ty->isExtVectorBoolType()) {
558 cgm.errorNYI("emitToMemory: extVectorBoolType");
559 }
560
561 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
562 // kept as `cir.int<N>` until further lowering
563
564 return value;
565}
566
567mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
568 if (auto *atomicTy = ty->getAs<AtomicType>())
569 ty = atomicTy->getValueType();
570
572 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
573 }
574
575 return value;
576}
577
578void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
579 bool isInit) {
580 if (lvalue.getType()->isConstantMatrixType()) {
581 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
582 return;
583 }
584
585 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
586 lvalue.getType(), lvalue.getBaseInfo(), isInit,
587 /*isNontemporal=*/false);
588}
589
590mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
591 QualType ty, SourceLocation loc,
592 LValueBaseInfo baseInfo) {
593 // Traditional LLVM codegen handles thread local separately, CIR handles
594 // as part of getAddrOfGlobalVar (GetGlobalOp).
595 mlir::Type eltTy = addr.getElementType();
596
597 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
598 if (clangVecTy->isExtVectorBoolType()) {
599 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
600 return nullptr;
601 }
602
603 const auto vecTy = cast<cir::VectorType>(eltTy);
604
605 // Handle vectors of size 3 like size 4 for better performance.
607 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
608 cgm.errorNYI(addr.getPointer().getLoc(),
609 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
610 }
611
613 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
614 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
615 cgm.errorNYI("emitLoadOfScalar: load atomic");
616
617 if (mlir::isa<cir::VoidType>(eltTy))
618 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
619
621
622 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
623 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
624 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
625
626 return loadOp;
627}
628
630 SourceLocation loc) {
633 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
634 lvalue.getType(), loc, lvalue.getBaseInfo());
635}
636
637/// Given an expression that represents a value lvalue, this
638/// method emits the address of the lvalue, then loads the result as an rvalue,
639/// returning the rvalue.
641 assert(!lv.getType()->isFunctionType());
642 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
643
644 if (lv.isBitField())
645 return emitLoadOfBitfieldLValue(lv, loc);
646
647 if (lv.isSimple())
648 return RValue::get(emitLoadOfScalar(lv, loc));
649
650 if (lv.isVectorElt()) {
651 const mlir::Value load =
652 builder.createLoad(getLoc(loc), lv.getVectorAddress());
653 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
654 lv.getVectorIdx()));
655 }
656
657 if (lv.isExtVectorElt())
659
660 cgm.errorNYI(loc, "emitLoadOfLValue");
661 return RValue::get(nullptr);
662}
663
664int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
665 const mlir::ArrayAttr elts) {
666 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
667 return elt.getInt();
668}
669
670// If this is a reference to a subset of the elements of a vector, create an
671// appropriate shufflevector.
673 mlir::Location loc = lv.getExtVectorPointer().getLoc();
674 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
675
676 // HLSL allows treating scalars as one-element vectors. Converting the scalar
677 // IR value to a vector here allows the rest of codegen to behave as normal.
678 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
679 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
680 return {};
681 }
682
683 const mlir::ArrayAttr elts = lv.getExtVectorElts();
684
685 // If the result of the expression is a non-vector type, we must be extracting
686 // a single element. Just codegen as an extractelement.
687 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
688 if (!exprVecTy) {
689 int64_t indexValue = getAccessedFieldNo(0, elts);
690 cir::ConstantOp index =
691 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
692 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
693 }
694
695 // Always use shuffle vector to try to retain the original program structure
697 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
698 mask.push_back(getAccessedFieldNo(i, elts));
699
700 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
701 if (lv.getType()->isExtVectorBoolType()) {
702 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
703 return {};
704 }
705
706 return RValue::get(resultVec);
707}
708
709LValue
711 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
712 "unexpected binary operator opcode");
713
714 Address baseAddr = Address::invalid();
715 if (e->getOpcode() == BO_PtrMemD)
716 baseAddr = emitLValue(e->getLHS()).getAddress();
717 else
718 baseAddr = emitPointerWithAlignment(e->getLHS());
719
720 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
721
722 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
723
724 LValueBaseInfo baseInfo;
726 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
727 memberPtrTy, &baseInfo);
728
729 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
730}
731
732/// Generates lvalue for partial ext_vector access.
734 mlir::Location loc) {
735 Address vectorAddress = lv.getExtVectorAddress();
736 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
737 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
738 Address castToPointerElement =
739 vectorAddress.withElementType(builder, vectorElementTy);
740
741 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
742 unsigned idx = getAccessedFieldNo(0, extVecElts);
743 mlir::Value idxValue =
744 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
745
746 mlir::Value elementValue = builder.getArrayElement(
747 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
748 /*shouldDecay=*/false);
749
750 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
751 const CharUnits alignment =
752 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
753 return Address(elementValue, vectorElementTy, alignment);
754}
755
756static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
758 return cgm.getAddrOfFunction(gd);
759}
760
762 mlir::Value thisValue) {
763 return cgf.emitLValueForLambdaField(fd, thisValue);
764}
765
766/// Given that we are currently emitting a lambda, emit an l-value for
767/// one of its members.
768///
770 mlir::Value thisValue) {
771 bool hasExplicitObjectParameter = false;
772 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
773 LValue lambdaLV;
774 if (methD) {
775 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
776 assert(methD->getParent()->isLambda());
777 assert(methD->getParent() == field->getParent());
778 }
779 if (hasExplicitObjectParameter) {
780 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
781 } else {
782 QualType lambdaTagType =
784 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
785 }
786 return emitLValueForField(lambdaLV, field);
787}
788
792
793static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
794 GlobalDecl gd) {
795 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
796 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
797 mlir::Location loc = cgf.getLoc(e->getSourceRange());
798 CharUnits align = cgf.getContext().getDeclAlign(fd);
799
801
802 mlir::Type fnTy = funcOp.getFunctionType();
803 mlir::Type ptrTy = cir::PointerType::get(fnTy);
804 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
805 funcOp.getSymName());
806
807 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
808 fnTy = cgf.convertType(fd->getType());
809 ptrTy = cir::PointerType::get(fnTy);
810
811 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
812 cir::CastKind::bitcast, addr);
813 }
814
815 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
817}
818
819/// Determine whether we can emit a reference to \p vd from the current
820/// context, despite not necessarily having seen an odr-use of the variable in
821/// this context.
822/// TODO(cir): This could be shared with classic codegen.
824 const DeclRefExpr *e,
825 const VarDecl *vd) {
826 // For a variable declared in an enclosing scope, do not emit a spurious
827 // reference even if we have a capture, as that will emit an unwarranted
828 // reference to our capture state, and will likely generate worse code than
829 // emitting a local copy.
831 return false;
832
833 // For a local declaration declared in this function, we can always reference
834 // it even if we don't have an odr-use.
835 if (vd->hasLocalStorage()) {
836 return vd->getDeclContext() ==
837 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
838 }
839
840 // For a global declaration, we can emit a reference to it if we know
841 // for sure that we are able to emit a definition of it.
842 vd = vd->getDefinition(cgf.getContext());
843 if (!vd)
844 return false;
845
846 // Don't emit a spurious reference if it might be to a variable that only
847 // exists on a different device / target.
848 // FIXME: This is unnecessarily broad. Check whether this would actually be a
849 // cross-target reference.
850 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
851 cgf.getLangOpts().OpenCL) {
852 return false;
853 }
854
855 // We can emit a spurious reference only if the linkage implies that we'll
856 // be emitting a non-interposable symbol that will be retained until link
857 // time.
858 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
859 case cir::GlobalLinkageKind::ExternalLinkage:
860 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
861 case cir::GlobalLinkageKind::WeakODRLinkage:
862 case cir::GlobalLinkageKind::InternalLinkage:
863 case cir::GlobalLinkageKind::PrivateLinkage:
864 return true;
865 default:
866 return false;
867 }
868}
869
871 const NamedDecl *nd = e->getDecl();
872 QualType ty = e->getType();
873
874 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
875 "should not emit an unevaluated operand");
876
877 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
878 // Global Named registers access via intrinsics only
879 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
880 !vd->isLocalVarDecl()) {
881 cgm.errorNYI(e->getSourceRange(),
882 "emitDeclRefLValue: Global Named registers access");
883 return LValue();
884 }
885
886 if (e->isNonOdrUse() == NOUR_Constant &&
887 (vd->getType()->isReferenceType() ||
888 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
889 vd->getAnyInitializer(vd);
890 mlir::Attribute val = ConstantEmitter(*this).emitAbstract(
891 e->getLocation(), *vd->evaluateValue(), vd->getType());
892 assert(val && "failed to emit constant expression");
893
894 Address addr = Address::invalid();
895 if (!vd->getType()->isReferenceType()) {
896 // Spill the constant value to a global.
897 addr = cgm.createUnnamedGlobalFrom(*vd, val,
898 getContext().getDeclAlign(vd));
899 mlir::Type varTy = getTypes().convertTypeForMem(vd->getType());
900 auto ptrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
901 if (ptrTy.getPointee() != varTy) {
902 addr = addr.withElementType(builder, varTy);
903 }
904 } else {
905 // Should we be using the alignment of the constant pointer we emitted?
906 CharUnits alignment = cgm.getNaturalTypeAlignment(
907 e->getType(), /*baseInfo=*/nullptr, /*forPointeeType=*/true);
908 // Classic codegen passes TBAA as null-ptr to the above function, so it
909 // probably needs to deal with that.
911 mlir::Value ptrVal = getBuilder().getConstant(
912 getLoc(e->getSourceRange()), mlir::cast<mlir::TypedAttr>(val));
913 addr = makeNaturalAddressForPointer(ptrVal, ty, alignment);
914 }
915 return makeAddrLValue(addr, ty, AlignmentSource::Decl);
916 }
917
918 // Check for captured variables.
920 vd = vd->getCanonicalDecl();
921 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
922 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
925 }
926 }
927
928 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
929 // Checks for omitted feature handling
936
937 // Check if this is a global variable
938 if (vd->hasLinkage() || vd->isStaticDataMember())
939 return emitGlobalVarDeclLValue(*this, e, vd);
940
941 Address addr = Address::invalid();
942
943 // The variable should generally be present in the local decl map.
944 auto iter = localDeclMap.find(vd);
945 if (iter != localDeclMap.end()) {
946 addr = iter->second;
947 } else {
948 // Otherwise, it might be static local we haven't emitted yet for some
949 // reason; most likely, because it's in an outer function.
950 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
951 }
952
953 // Drill into reference types.
954 LValue lv =
955 vd->getType()->isReferenceType()
959
960 // Statics are defined as globals, so they are not include in the function's
961 // symbol table.
962 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
963 "non-static locals should be already mapped");
964
965 return lv;
966 }
967
968 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
971 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
972 return LValue();
973 }
974 return emitLValue(bd->getBinding());
975 }
976
977 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
978 LValue lv = emitFunctionDeclLValue(*this, e, fd);
979
980 // Emit debuginfo for the function declaration if the target wants to.
981 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
983
984 return lv;
985 }
986
987 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
988 return LValue();
989}
990
992 QualType boolTy = getContext().BoolTy;
993 SourceLocation loc = e->getExprLoc();
994
996 if (e->getType()->getAs<MemberPointerType>()) {
997 cgm.errorNYI(e->getSourceRange(),
998 "evaluateExprAsBool: member pointer type");
999 return createDummyValue(getLoc(loc), boolTy);
1000 }
1001
1002 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1003 if (!e->getType()->isAnyComplexType())
1004 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
1005
1007 loc);
1008}
1009
1011 UnaryOperatorKind op = e->getOpcode();
1012
1013 // __extension__ doesn't affect lvalue-ness.
1014 if (op == UO_Extension)
1015 return emitLValue(e->getSubExpr());
1016
1017 switch (op) {
1018 case UO_Deref: {
1020 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1021
1023 LValueBaseInfo baseInfo;
1024 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
1025
1026 // Tag 'load' with deref attribute.
1027 // FIXME: This misses some derefence cases and has problematic interactions
1028 // with other operators.
1029 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1030 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1031
1032 LValue lv = makeAddrLValue(addr, t, baseInfo);
1035 return lv;
1036 }
1037 case UO_Real:
1038 case UO_Imag: {
1039 LValue lv = emitLValue(e->getSubExpr());
1040 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1041
1042 // __real is valid on scalars. This is a faster way of testing that.
1043 // __imag can only produce an rvalue on scalars.
1044 if (e->getOpcode() == UO_Real &&
1045 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1046 assert(e->getSubExpr()->getType()->isArithmeticType());
1047 return lv;
1048 }
1049
1051 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1052 mlir::Location loc = getLoc(e->getExprLoc());
1053 Address component =
1054 e->getOpcode() == UO_Real
1055 ? builder.createComplexRealPtr(loc, lv.getAddress())
1056 : builder.createComplexImagPtr(loc, lv.getAddress());
1058 LValue elemLV = makeAddrLValue(component, elemTy);
1059 elemLV.getQuals().addQualifiers(lv.getQuals());
1060 return elemLV;
1061 }
1062 case UO_PreInc:
1063 case UO_PreDec: {
1064 LValue lv = emitLValue(e->getSubExpr());
1065
1066 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1067
1068 if (e->getType()->isAnyComplexType())
1070 else
1072
1073 return lv;
1074 }
1075 case UO_Extension:
1076 llvm_unreachable("UnaryOperator extension should be handled above!");
1077 case UO_Plus:
1078 case UO_Minus:
1079 case UO_Not:
1080 case UO_LNot:
1081 case UO_AddrOf:
1082 case UO_PostInc:
1083 case UO_PostDec:
1084 case UO_Coawait:
1085 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1086 }
1087 llvm_unreachable("Unknown unary operator kind!");
1088}
1089
1090/// If the specified expr is a simple decay from an array to pointer,
1091/// return the array subexpression.
1092/// FIXME: this could be abstracted into a common AST helper.
1093static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1094 // If this isn't just an array->pointer decay, bail out.
1095 const auto *castExpr = dyn_cast<CastExpr>(e);
1096 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1097 return nullptr;
1098
1099 // If this is a decay from variable width array, bail out.
1100 const Expr *subExpr = castExpr->getSubExpr();
1101 if (subExpr->getType()->isVariableArrayType())
1102 return nullptr;
1103
1104 return subExpr;
1105}
1106
1107static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1108 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1109 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1110 return constantOp.getValueAttr<cir::IntAttr>();
1111 return {};
1112}
1113
1114static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1115 CharUnits eltSize) {
1116 // If we have a constant index, we can use the exact offset of the
1117 // element we're accessing.
1118 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1119 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1120 return arrayAlign.alignmentAtOffset(offset);
1121 }
1122 // Otherwise, use the worst-case alignment for any element.
1123 return arrayAlign.alignmentOfArrayElement(eltSize);
1124}
1125
1127 const VariableArrayType *vla) {
1128 QualType eltType;
1129 do {
1130 eltType = vla->getElementType();
1131 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1132 return eltType;
1133}
1134
1136 mlir::Location beginLoc,
1137 mlir::Location endLoc, mlir::Value ptr,
1138 mlir::Type eltTy, mlir::Value idx,
1139 bool shouldDecay) {
1140 CIRGenModule &cgm = cgf.getCIRGenModule();
1141 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1142 // that would enhance tracking this later in CIR?
1144 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1145 shouldDecay);
1146}
1147
1149 mlir::Location beginLoc,
1150 mlir::Location endLoc, Address addr,
1151 QualType eltType, mlir::Value idx,
1152 mlir::Location loc, bool shouldDecay) {
1153
1154 // Determine the element size of the statically-sized base. This is
1155 // the thing that the indices are expressed in terms of.
1156 if (const VariableArrayType *vla =
1157 cgf.getContext().getAsVariableArrayType(eltType)) {
1158 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1159 }
1160
1161 // We can use that to compute the best alignment of the element.
1162 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1163 const CharUnits eltAlign =
1164 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1165
1167 const mlir::Value eltPtr =
1168 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1169 addr.getElementType(), idx, shouldDecay);
1170 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1171 return Address(eltPtr, elementType, eltAlign);
1172}
1173
1174LValue
1176 if (e->getType()->getAs<ObjCObjectType>()) {
1177 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1179 }
1180
1181 // The index must always be an integer, which is not an aggregate. Emit it
1182 // in lexical order (this complexity is, sadly, required by C++17).
1183 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1184 "index was neither LHS nor RHS");
1185
1186 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1187 const mlir::Value idx = emitScalarExpr(e->getIdx());
1188
1189 // Extend or truncate the index type to 32 or 64-bits.
1190 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1191 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1192 cgm.errorNYI(e->getSourceRange(),
1193 "emitArraySubscriptExpr: index type cast");
1194 return idx;
1195 };
1196
1197 // If the base is a vector type, then we are forming a vector element
1198 // with this subscript.
1199 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1201 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1202 const LValue lv = emitLValue(e->getBase());
1203 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1204 lv.getBaseInfo());
1205 }
1206
1207 // The HLSL runtime handles subscript expressions on global resource arrays
1208 // and objects with HLSL buffer layouts.
1209 if (getLangOpts().HLSL) {
1210 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1211 return {};
1212 }
1213
1214 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1215
1216 // Handle the extvector case we ignored above.
1218 const LValue lv = emitLValue(e->getBase());
1219 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1220
1221 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1222 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1223 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1224 idx, cgm.getLoc(e->getExprLoc()),
1225 /*shouldDecay=*/false);
1226
1227 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1228 }
1229
1230 if (const VariableArrayType *vla =
1231 getContext().getAsVariableArrayType(e->getType())) {
1232 // The base must be a pointer, which is not an aggregate. Emit
1233 // it. It needs to be emitted first in case it's what captures
1234 // the VLA bounds.
1236
1237 // The element count here is the total number of non-VLA elements.
1238 mlir::Value numElements = getVLASize(vla).numElts;
1239 idx = builder.createIntCast(idx, numElements.getType());
1240
1241 // Effectively, the multiply by the VLA size is part of the GEP.
1242 // GEP indexes are signed, and scaling an index isn't permitted to
1243 // signed-overflow, so we use the same semantics for our explicit
1244 // multiply. We suppress this if overflow is not undefined behavior.
1245 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1248 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1249 overflowBehavior);
1250
1251 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1252 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1253 idx, cgm.getLoc(e->getExprLoc()),
1254 /*shouldDecay=*/false);
1255
1256 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1257 }
1258
1259 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1260 LValue arrayLV;
1261 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1262 arrayLV = emitArraySubscriptExpr(ase);
1263 else
1264 arrayLV = emitLValue(array);
1265
1266 // Propagate the alignment from the array itself to the result.
1267 const Address addr = emitArraySubscriptPtr(
1268 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1269 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1270 /*shouldDecay=*/true);
1271
1272 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1273
1274 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1275 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1276 }
1277
1278 return lv;
1279 }
1280
1281 // The base must be a pointer; emit it with an estimate of its alignment.
1282 assert(e->getBase()->getType()->isPointerType() &&
1283 "The base must be a pointer");
1284
1285 LValueBaseInfo eltBaseInfo;
1286 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1287 // Propagate the alignment from the array itself to the result.
1288 const Address addxr = emitArraySubscriptPtr(
1289 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1290 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1291 /*shouldDecay=*/false);
1292
1293 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1294
1295 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1296 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1297 }
1298
1299 return lv;
1300}
1301
1303 // Emit the base vector as an l-value.
1304 LValue base;
1305
1306 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1307 if (e->isArrow()) {
1308 // If it is a pointer to a vector, emit the address and form an lvalue with
1309 // it.
1310 LValueBaseInfo baseInfo;
1311 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1312 const auto *clangPtrTy =
1314 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1315 base.getQuals().removeObjCGCAttr();
1316 } else if (e->getBase()->isGLValue()) {
1317 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1318 // emit the base as an lvalue.
1319 assert(e->getBase()->getType()->isVectorType());
1320 base = emitLValue(e->getBase());
1321 } else {
1322 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1323 assert(e->getBase()->getType()->isVectorType() &&
1324 "Result must be a vector");
1325 mlir::Value vec = emitScalarExpr(e->getBase());
1326
1327 // Store the vector to memory (because LValue wants an address).
1328 QualType baseTy = e->getBase()->getType();
1329 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1330 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1331 cgm.errorNYI(e->getSourceRange(),
1332 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1333 return {};
1334 }
1335 builder.createStore(vec.getLoc(), vec, vecMem);
1336 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1337 }
1338
1339 QualType type =
1341
1342 // Encode the element access list into a vector of unsigned indices.
1344 e->getEncodedElementAccess(indices);
1345
1346 if (base.isSimple()) {
1347 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1348 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1349 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1350 base.getBaseInfo());
1351 }
1352
1353 cgm.errorNYI(e->getSourceRange(),
1354 "emitExtVectorElementExpr: isSimple is false");
1355 return {};
1356}
1357
1359 llvm::StringRef name) {
1360 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1361 assert(globalOp.getAlignment() && "expected alignment for string literal");
1362 unsigned align = *(globalOp.getAlignment());
1363 mlir::Value addr =
1364 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1365 return makeAddrLValue(
1366 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1368}
1369
1370/// Casts are never lvalues unless that cast is to a reference type. If the cast
1371/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1372/// is needed by the code generator in an lvalue context, then it must mean that
1373/// we need the address of an aggregate in order to access one of its members.
1374/// This can happen for all the reasons that casts are permitted with aggregate
1375/// result, including noop aggregate casts, and cast from scalar to union.
1377 switch (e->getCastKind()) {
1378 case CK_ToVoid:
1379 case CK_BitCast:
1380 case CK_LValueToRValueBitCast:
1381 case CK_ArrayToPointerDecay:
1382 case CK_FunctionToPointerDecay:
1383 case CK_NullToMemberPointer:
1384 case CK_NullToPointer:
1385 case CK_IntegralToPointer:
1386 case CK_PointerToIntegral:
1387 case CK_PointerToBoolean:
1388 case CK_IntegralCast:
1389 case CK_BooleanToSignedIntegral:
1390 case CK_IntegralToBoolean:
1391 case CK_IntegralToFloating:
1392 case CK_FloatingToIntegral:
1393 case CK_FloatingToBoolean:
1394 case CK_FloatingCast:
1395 case CK_FloatingRealToComplex:
1396 case CK_FloatingComplexToReal:
1397 case CK_FloatingComplexToBoolean:
1398 case CK_FloatingComplexCast:
1399 case CK_FloatingComplexToIntegralComplex:
1400 case CK_IntegralRealToComplex:
1401 case CK_IntegralComplexToReal:
1402 case CK_IntegralComplexToBoolean:
1403 case CK_IntegralComplexCast:
1404 case CK_IntegralComplexToFloatingComplex:
1405 case CK_DerivedToBaseMemberPointer:
1406 case CK_BaseToDerivedMemberPointer:
1407 case CK_MemberPointerToBoolean:
1408 case CK_ReinterpretMemberPointer:
1409 case CK_AnyPointerToBlockPointerCast:
1410 case CK_ARCProduceObject:
1411 case CK_ARCConsumeObject:
1412 case CK_ARCReclaimReturnedObject:
1413 case CK_ARCExtendBlockObject:
1414 case CK_CopyAndAutoreleaseBlockObject:
1415 case CK_IntToOCLSampler:
1416 case CK_FloatingToFixedPoint:
1417 case CK_FixedPointToFloating:
1418 case CK_FixedPointCast:
1419 case CK_FixedPointToBoolean:
1420 case CK_FixedPointToIntegral:
1421 case CK_IntegralToFixedPoint:
1422 case CK_MatrixCast:
1423 case CK_HLSLVectorTruncation:
1424 case CK_HLSLMatrixTruncation:
1425 case CK_HLSLArrayRValue:
1426 case CK_HLSLElementwiseCast:
1427 case CK_HLSLAggregateSplatCast:
1428 llvm_unreachable("unexpected cast lvalue");
1429
1430 case CK_Dependent:
1431 llvm_unreachable("dependent cast kind in IR gen!");
1432
1433 case CK_BuiltinFnToFnPtr:
1434 llvm_unreachable("builtin functions are handled elsewhere");
1435
1436 case CK_Dynamic: {
1437 LValue lv = emitLValue(e->getSubExpr());
1438 Address v = lv.getAddress();
1439 const auto *dce = cast<CXXDynamicCastExpr>(e);
1441 }
1442
1443 // These are never l-values; just use the aggregate emission code.
1444 case CK_NonAtomicToAtomic:
1445 case CK_AtomicToNonAtomic:
1446 case CK_ToUnion:
1447 case CK_ObjCObjectLValueCast:
1448 case CK_VectorSplat:
1449 case CK_ConstructorConversion:
1450 case CK_UserDefinedConversion:
1451 case CK_CPointerToObjCPointerCast:
1452 case CK_BlockPointerToObjCPointerCast:
1453 case CK_LValueToRValue: {
1454 cgm.errorNYI(e->getSourceRange(),
1455 std::string("emitCastLValue for unhandled cast kind: ") +
1456 e->getCastKindName());
1457
1458 return {};
1459 }
1460 case CK_AddressSpaceConversion: {
1461 LValue lv = emitLValue(e->getSubExpr());
1462 QualType destTy = getContext().getPointerType(e->getType());
1463
1464 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1465 mlir::ptr::MemorySpaceAttrInterface srcAS;
1466 if (clang::isTargetAddressSpace(srcLangAS))
1467 srcAS = cir::toCIRAddressSpaceAttr(getMLIRContext(), srcLangAS);
1468 else
1469 cgm.errorNYI(
1470 e->getSourceRange(),
1471 "emitCastLValue: address space conversion from unknown address "
1472 "space");
1473
1474 mlir::Value v = performAddrSpaceCast(lv.getPointer(), convertType(destTy));
1475
1477 lv.getAddress().getAlignment()),
1478 e->getType(), lv.getBaseInfo());
1479 }
1480
1481 case CK_LValueBitCast: {
1482 // This must be a reinterpret_cast (or c-style equivalent).
1483 const auto *ce = cast<ExplicitCastExpr>(e);
1484
1485 cgm.emitExplicitCastExprType(ce, this);
1486 LValue LV = emitLValue(e->getSubExpr());
1488 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1489
1490 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1491 }
1492
1493 case CK_NoOp: {
1494 // CK_NoOp can model a qualification conversion, which can remove an array
1495 // bound and change the IR type.
1496 LValue lv = emitLValue(e->getSubExpr());
1497 // Propagate the volatile qualifier to LValue, if exists in e.
1499 lv.getQuals() = e->getType().getQualifiers();
1500 if (lv.isSimple()) {
1501 Address v = lv.getAddress();
1502 if (v.isValid()) {
1503 mlir::Type ty = convertTypeForMem(e->getType());
1504 if (v.getElementType() != ty)
1505 cgm.errorNYI(e->getSourceRange(),
1506 "emitCastLValue: NoOp needs bitcast");
1507 }
1508 }
1509 return lv;
1510 }
1511
1512 case CK_UncheckedDerivedToBase:
1513 case CK_DerivedToBase: {
1514 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1515
1516 LValue lv = emitLValue(e->getSubExpr());
1517 Address thisAddr = lv.getAddress();
1518
1519 // Perform the derived-to-base conversion
1520 Address baseAddr =
1521 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1522 /*NullCheckValue=*/false, e->getExprLoc());
1523
1524 // TODO: Support accesses to members of base classes in TBAA. For now, we
1525 // conservatively pretend that the complete object is of the base class
1526 // type.
1528 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1529 }
1530
1531 case CK_BaseToDerived: {
1532 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1533 LValue lv = emitLValue(e->getSubExpr());
1534
1535 // Perform the base-to-derived conversion
1537 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1538 e->path(), /*NullCheckValue=*/false);
1539 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1540 // performed and the object is not of the derived type.
1542
1544 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1545 }
1546
1547 case CK_ZeroToOCLOpaqueType:
1548 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1549 }
1550
1551 llvm_unreachable("Invalid cast kind");
1552}
1553
1555 const MemberExpr *me) {
1556 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1557 // Try to emit static variable member expressions as DREs.
1558 return DeclRefExpr::Create(
1560 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1561 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1562 }
1563 return nullptr;
1564}
1565
1567 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1569 return emitDeclRefLValue(dre);
1570 }
1571
1572 Expr *baseExpr = e->getBase();
1573 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1574 LValue baseLV;
1575 if (e->isArrow()) {
1576 LValueBaseInfo baseInfo;
1578 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1579 QualType ptrTy = baseExpr->getType()->getPointeeType();
1581 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1582 } else {
1584 baseLV = emitLValue(baseExpr);
1585 }
1586
1587 const NamedDecl *nd = e->getMemberDecl();
1588 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1589 LValue lv = emitLValueForField(baseLV, field);
1591 if (getLangOpts().OpenMP) {
1592 // If the member was explicitly marked as nontemporal, mark it as
1593 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1594 // to children as nontemporal too.
1595 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1596 }
1597 return lv;
1598 }
1599
1600 if (isa<FunctionDecl>(nd)) {
1601 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1602 return LValue();
1603 }
1604
1605 llvm_unreachable("Unhandled member declaration!");
1606}
1607
1608/// Evaluate an expression into a given memory location.
1610 Qualifiers quals, bool isInit) {
1611 // FIXME: This function should take an LValue as an argument.
1612 switch (getEvaluationKind(e->getType())) {
1613 case cir::TEK_Complex: {
1614 LValue lv = makeAddrLValue(location, e->getType());
1615 emitComplexExprIntoLValue(e, lv, isInit);
1616 return;
1617 }
1618
1619 case cir::TEK_Aggregate: {
1620 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1624 return;
1625 }
1626
1627 case cir::TEK_Scalar: {
1629 LValue lv = makeAddrLValue(location, e->getType());
1630 emitStoreThroughLValue(rv, lv);
1631 return;
1632 }
1633 }
1634
1635 llvm_unreachable("bad evaluation kind");
1636}
1637
1639 const MaterializeTemporaryExpr *m,
1640 const Expr *inner) {
1641 // TODO(cir): cgf.getTargetHooks();
1642 switch (m->getStorageDuration()) {
1643 case SD_FullExpression:
1644 case SD_Automatic: {
1645 QualType ty = inner->getType();
1646
1648
1649 // The temporary memory should be created in the same scope as the extending
1650 // declaration of the temporary materialization expression.
1651 cir::AllocaOp extDeclAlloca;
1652 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1653 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1654 if (extDeclAddrIter != cgf.localDeclMap.end())
1655 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1656 }
1657 mlir::OpBuilder::InsertPoint ip;
1658 if (extDeclAlloca)
1659 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1660 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1661 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1662 ip);
1663 }
1664 case SD_Thread:
1665 case SD_Static: {
1666 auto addr =
1667 mlir::cast<cir::GlobalOp>(cgf.cgm.getAddrOfGlobalTemporary(m, inner));
1668 auto getGlobal = cgf.cgm.getBuilder().createGetGlobal(addr);
1669 assert(addr.getAlignment().has_value() &&
1670 "This should always have an alignment");
1671 return Address(getGlobal,
1672 clang::CharUnits::fromQuantity(addr.getAlignment().value()));
1673 }
1674
1675 case SD_Dynamic:
1676 llvm_unreachable("temporary can't have dynamic storage duration");
1677 }
1678 llvm_unreachable("unknown storage duration");
1679}
1680
1682 const MaterializeTemporaryExpr *m,
1683 const Expr *e, Address referenceTemporary) {
1684 // Objective-C++ ARC:
1685 // If we are binding a reference to a temporary that has ownership, we
1686 // need to perform retain/release operations on the temporary.
1687 //
1688 // FIXME(ogcg): This should be looking at e, not m.
1689 if (m->getType().getObjCLifetime()) {
1690 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1691 return;
1692 }
1693
1695 if (dk == QualType::DK_none)
1696 return;
1697
1698 switch (m->getStorageDuration()) {
1699 case SD_Static:
1700 case SD_Thread: {
1701 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1702 if (const auto *classDecl =
1704 classDecl && !classDecl->hasTrivialDestructor())
1705 // Get the destructor for the reference temporary.
1706 referenceTemporaryDtor = classDecl->getDestructor();
1707
1708 if (!referenceTemporaryDtor)
1709 return;
1710
1711 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1712 "storage duration with destructors");
1713 break;
1714 }
1715
1716 case SD_FullExpression:
1717 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1719 break;
1720
1721 case SD_Automatic:
1722 cgf.cgm.errorNYI(e->getSourceRange(),
1723 "pushTemporaryCleanup: automatic storage duration");
1724 break;
1725
1726 case SD_Dynamic:
1727 llvm_unreachable("temporary cannot have dynamic storage duration");
1728 }
1729}
1730
1732 const MaterializeTemporaryExpr *m) {
1733 const Expr *e = m->getSubExpr();
1734
1735 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1736 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1737 "Reference should never be pseudo-strong!");
1738
1739 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1740 // as that will cause the lifetime adjustment to be lost for ARC
1741 auto ownership = m->getType().getObjCLifetime();
1742 if (ownership != Qualifiers::OCL_None &&
1743 ownership != Qualifiers::OCL_ExplicitNone) {
1744 cgm.errorNYI(e->getSourceRange(),
1745 "emitMaterializeTemporaryExpr: ObjCLifetime");
1746 return {};
1747 }
1748
1751 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1752
1753 for (const Expr *ignored : commaLHSs)
1754 emitIgnoredExpr(ignored);
1755
1756 if (isa<OpaqueValueExpr>(e)) {
1757 cgm.errorNYI(e->getSourceRange(),
1758 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1759 return {};
1760 }
1761
1762 // Create and initialize the reference temporary.
1763 Address object = createReferenceTemporary(*this, m, e);
1764
1765 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1766 // TODO(cir): add something akin to stripPointerCasts() to ptr above
1767 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1768 return {};
1769 } else {
1771 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1772 }
1773 pushTemporaryCleanup(*this, m, e, object);
1774
1775 // Perform derived-to-base casts and/or field accesses, to get from the
1776 // temporary object we created (and, potentially, for which we extended
1777 // the lifetime) to the subobject we're binding the reference to.
1778 if (!adjustments.empty()) {
1779 cgm.errorNYI(e->getSourceRange(),
1780 "emitMaterializeTemporaryExpr: Adjustments");
1781 return {};
1782 }
1783
1784 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1785}
1786
1787LValue
1790
1791 auto it = opaqueLValues.find(e);
1792 if (it != opaqueLValues.end())
1793 return it->second;
1794
1795 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1796 return emitLValue(e->getSourceExpr());
1797}
1798
1799RValue
1802
1803 auto it = opaqueRValues.find(e);
1804 if (it != opaqueRValues.end())
1805 return it->second;
1806
1807 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1808 return emitAnyExpr(e->getSourceExpr());
1809}
1810
1812 if (e->isFileScope()) {
1813 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1814 return {};
1815 }
1816
1817 if (e->getType()->isVariablyModifiedType())
1819
1820 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1821 ".compoundliteral");
1822 const Expr *initExpr = e->getInitializer();
1823 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1824
1825 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1826 /*Init*/ true);
1827
1828 // Block-scope compound literals are destroyed at the end of the enclosing
1829 // scope in C.
1830 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1831 cgm.errorNYI(e->getSourceRange(),
1832 "emitCompoundLiteralLValue: non C++ DestructedType");
1833 return {};
1834 }
1835
1836 return result;
1837}
1838
1840 RValue rv = emitCallExpr(e);
1841
1842 if (!rv.isScalar()) {
1843 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1844 return {};
1845 }
1846
1847 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1848 "Can't have a scalar return unless the return type is a "
1849 "reference type!");
1850
1852}
1853
1855 // Comma expressions just emit their LHS then their RHS as an l-value.
1856 if (e->getOpcode() == BO_Comma) {
1857 emitIgnoredExpr(e->getLHS());
1858 return emitLValue(e->getRHS());
1859 }
1860
1861 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1863
1864 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1865
1866 // Note that in all of these cases, __block variables need the RHS
1867 // evaluated first just in case the variable gets moved by the RHS.
1868
1870 case cir::TEK_Scalar: {
1872 if (e->getLHS()->getType().getObjCLifetime() !=
1874 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1875 return {};
1876 }
1877
1878 RValue rv = emitAnyExpr(e->getRHS());
1879 LValue lv = emitLValue(e->getLHS());
1880
1881 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1882 if (lv.isBitField())
1884 else
1885 emitStoreThroughLValue(rv, lv);
1886
1887 if (getLangOpts().OpenMP) {
1888 cgm.errorNYI(e->getSourceRange(), "openmp");
1889 return {};
1890 }
1891
1892 return lv;
1893 }
1894
1895 case cir::TEK_Complex: {
1897 }
1898
1899 case cir::TEK_Aggregate:
1900 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1901 return {};
1902 }
1903 llvm_unreachable("bad evaluation kind");
1904}
1905
1906/// Emit code to compute the specified expression which
1907/// can have any type. The result is returned as an RValue struct.
1909 bool ignoreResult) {
1911 case cir::TEK_Scalar:
1912 return RValue::get(emitScalarExpr(e, ignoreResult));
1913 case cir::TEK_Complex:
1915 case cir::TEK_Aggregate: {
1916 if (!ignoreResult && aggSlot.isIgnored())
1917 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1919 emitAggExpr(e, aggSlot);
1920 return aggSlot.asRValue();
1921 }
1922 }
1923 llvm_unreachable("bad evaluation kind");
1924}
1925
1926// Detect the unusual situation where an inline version is shadowed by a
1927// non-inline version. In that case we should pick the external one
1928// everywhere. That's GCC behavior too.
1930 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1931 if (!pd->isInlineBuiltinDeclaration())
1932 return false;
1933 return true;
1934}
1935
1936CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1937 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1938
1939 if (unsigned builtinID = fd->getBuiltinID()) {
1940 StringRef ident = cgm.getMangledName(gd);
1941 std::string fdInlineName = (ident + ".inline").str();
1942
1943 bool isPredefinedLibFunction =
1944 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1945 // TODO: Read no-builtin function attribute and set this accordingly.
1946 // Using false here matches OGCG's default behavior - builtins are called
1947 // as builtins unless explicitly disabled. The previous value of true was
1948 // overly conservative and caused functions to be marked as no_inline when
1949 // they shouldn't be.
1950 bool hasAttributeNoBuiltin = false;
1952
1953 // When directing calling an inline builtin, call it through it's mangled
1954 // name to make it clear it's not the actual builtin.
1955 auto fn = cast<cir::FuncOp>(curFn);
1956 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1957 cir::FuncOp clone =
1958 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1959
1960 if (!clone) {
1961 // Create a forward declaration - the body will be generated in
1962 // generateCode when the function definition is processed
1963 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1964 mlir::OpBuilder::InsertionGuard guard(builder);
1965 builder.setInsertionPointToStart(cgm.getModule().getBody());
1966
1967 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1968 calleeFunc.getFunctionType());
1969 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1970 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1971 clone.setSymVisibility("private");
1972 clone.setInlineKind(cir::InlineKind::AlwaysInline);
1973 }
1974 return CIRGenCallee::forDirect(clone, gd);
1975 }
1976
1977 // Replaceable builtins provide their own implementation of a builtin. If we
1978 // are in an inline builtin implementation, avoid trivial infinite
1979 // recursion. Honor __attribute__((no_builtin("foo"))) or
1980 // __attribute__((no_builtin)) on the current function unless foo is
1981 // not a predefined library function which means we must generate the
1982 // builtin no matter what.
1983 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1984 return CIRGenCallee::forBuiltin(builtinID, fd);
1985 }
1986
1987 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1988
1989 if ((cgm.getLangOpts().CUDA || cgm.getLangOpts().HIP) &&
1990 !cgm.getLangOpts().CUDAIsDevice && fd->hasAttr<CUDAGlobalAttr>()) {
1991 mlir::Operation *handle = cgm.getCUDARuntime().getKernelHandle(callee, gd);
1992 callee =
1993 mlir::cast<cir::FuncOp>(*cgm.getCUDARuntime().getKernelStub(handle));
1994 }
1995
1996 return CIRGenCallee::forDirect(callee, gd);
1997}
1998
2000 if (ty->isVoidType())
2001 return RValue::get(nullptr);
2002
2003 cgm.errorNYI("unsupported type for undef rvalue");
2004 return RValue::get(nullptr);
2005}
2006
2008 const CIRGenCallee &origCallee,
2009 const clang::CallExpr *e,
2011 // Get the actual function type. The callee type will always be a pointer to
2012 // function type or a block pointer type.
2013 assert(calleeTy->isFunctionPointerType() &&
2014 "Callee must have function pointer type!");
2015
2016 calleeTy = getContext().getCanonicalType(calleeTy);
2017 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
2018
2019 CIRGenCallee callee = origCallee;
2020
2021 if (getLangOpts().CPlusPlus)
2023
2024 const auto *fnType = cast<FunctionType>(pointeeTy);
2025
2027
2028 CallArgList args;
2030
2031 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
2032 e->getDirectCallee());
2033
2034 const CIRGenFunctionInfo &funcInfo =
2035 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2036
2037 // C99 6.5.2.2p6:
2038 // If the expression that denotes the called function has a type that does
2039 // not include a prototype, [the default argument promotions are performed].
2040 // If the number of arguments does not equal the number of parameters, the
2041 // behavior is undefined. If the function is defined with a type that
2042 // includes a prototype, and either the prototype ends with an ellipsis (,
2043 // ...) or the types of the arguments after promotion are not compatible
2044 // with the types of the parameters, the behavior is undefined. If the
2045 // function is defined with a type that does not include a prototype, and
2046 // the types of the arguments after promotion are not compatible with those
2047 // of the parameters after promotion, the behavior is undefined [except in
2048 // some trivial cases].
2049 // That is, in the general case, we should assume that a call through an
2050 // unprototyped function type works like a *non-variadic* call. The way we
2051 // make this work is to cast to the exxact type fo the promoted arguments.
2052 if (isa<FunctionNoProtoType>(fnType)) {
2055 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2056 // get non-variadic function type
2057 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2058 calleeTy.getReturnType(), false);
2059 auto calleePtrTy = cir::PointerType::get(calleeTy);
2060
2061 mlir::Operation *fn = callee.getFunctionPointer();
2062 mlir::Value addr;
2063 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2064 addr = cir::GetGlobalOp::create(
2065 builder, getLoc(e->getSourceRange()),
2066 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2067 } else {
2068 addr = fn->getResult(0);
2069 }
2070
2071 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2072 callee.setFunctionPointer(fn);
2073 }
2074
2076 assert(!cir::MissingFeatures::hip());
2078
2079 cir::CIRCallOpInterface callOp;
2080 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2081 getLoc(e->getExprLoc()));
2082
2084
2085 return callResult;
2086}
2087
2089 e = e->IgnoreParens();
2090
2091 // Look through function-to-pointer decay.
2092 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2093 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2094 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2095 return emitCallee(implicitCast->getSubExpr());
2096 }
2097 // When performing an indirect call through a function pointer lvalue, the
2098 // function pointer lvalue is implicitly converted to an rvalue through an
2099 // lvalue-to-rvalue conversion.
2100 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2101 "unexpected implicit cast on function pointers");
2102 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2103 // Resolve direct calls.
2104 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2105 return emitDirectCallee(funcDecl);
2106 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2107 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2108 emitIgnoredExpr(me->getBase());
2109 return emitDirectCallee(fd);
2110 }
2111 // Else fall through to the indirect reference handling below.
2112 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2114 }
2115
2116 // Otherwise, we have an indirect reference.
2117 mlir::Value calleePtr;
2119 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2120 calleePtr = emitScalarExpr(e);
2121 functionType = ptrType->getPointeeType();
2122 } else {
2123 functionType = e->getType();
2124 calleePtr = emitLValue(e).getPointer();
2125 }
2126 assert(functionType->isFunctionType());
2127
2128 GlobalDecl gd;
2129 if (const auto *vd =
2130 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2131 gd = GlobalDecl(vd);
2132
2133 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2134 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2135 return callee;
2136}
2137
2141
2142 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2144
2145 if (const auto *cudaKernelCallExpr = dyn_cast<CUDAKernelCallExpr>(e))
2147
2148 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2149 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2150 // operator member call.
2151 if (const CXXMethodDecl *md =
2152 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2153 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2154 // A CXXOperatorCallExpr is created even for explicit object methods, but
2155 // these should be treated like static function calls. Fall through to do
2156 // that.
2157 }
2158
2159 CIRGenCallee callee = emitCallee(e->getCallee());
2160
2161 if (callee.isBuiltin())
2162 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2163 returnValue);
2164
2165 if (callee.isPseudoDestructor())
2167
2168 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2169}
2170
2171/// Emit code to compute the specified expression, ignoring the result.
2173 if (e->isPRValue()) {
2174 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2175 return;
2176 }
2177
2178 // Just emit it as an l-value and drop the result.
2179 emitLValue(e);
2180}
2181
2183 LValueBaseInfo *baseInfo) {
2185 assert(e->getType()->isArrayType() &&
2186 "Array to pointer decay must have array source type!");
2187
2188 // Expressions of array type can't be bitfields or vector elements.
2189 LValue lv = emitLValue(e);
2190 Address addr = lv.getAddress();
2191
2192 // If the array type was an incomplete type, we need to make sure
2193 // the decay ends up being the right type.
2194 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2195
2196 if (e->getType()->isVariableArrayType())
2197 return addr;
2198
2199 [[maybe_unused]] auto pointeeTy =
2200 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2201
2202 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2203 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2204 assert(pointeeTy == arrayTy);
2205
2206 // The result of this decay conversion points to an array element within the
2207 // base lvalue. However, since TBAA currently does not support representing
2208 // accesses to elements of member arrays, we conservatively represent accesses
2209 // to the pointee object as if it had no any base lvalue specified.
2210 // TODO: Support TBAA for member arrays.
2213
2214 mlir::Value ptr = builder.maybeBuildArrayDecay(
2215 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2216 convertTypeForMem(eltType));
2217 return Address(ptr, addr.getAlignment());
2218}
2219
2220/// Given the address of a temporary variable, produce an r-value of its type.
2224 switch (getEvaluationKind(type)) {
2225 case cir::TEK_Complex:
2226 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2227 case cir::TEK_Aggregate:
2228 return lvalue.asAggregateRValue();
2229 case cir::TEK_Scalar:
2230 return RValue::get(emitLoadOfScalar(lvalue, loc));
2231 }
2232 llvm_unreachable("bad evaluation kind");
2233}
2234
2235/// Emit an `if` on a boolean condition, filling `then` and `else` into
2236/// appropriated regions.
2237mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2238 const Stmt *thenS,
2239 const Stmt *elseS) {
2240 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2241 std::optional<mlir::Location> elseLoc;
2242 if (elseS)
2243 elseLoc = getLoc(elseS->getSourceRange());
2244
2245 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2247 cond, /*thenBuilder=*/
2248 [&](mlir::OpBuilder &, mlir::Location) {
2249 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2250 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2251 },
2252 thenLoc,
2253 /*elseBuilder=*/
2254 [&](mlir::OpBuilder &, mlir::Location) {
2255 assert(elseLoc && "Invalid location for elseS.");
2256 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2257 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2258 },
2259 elseLoc);
2260
2261 return mlir::LogicalResult::success(resThen.succeeded() &&
2262 resElse.succeeded());
2263}
2264
2265/// Emit an `if` on a boolean condition, filling `then` and `else` into
2266/// appropriated regions.
2268 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2269 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2270 std::optional<mlir::Location> elseLoc) {
2271 // Attempt to be as accurate as possible with IfOp location, generate
2272 // one fused location that has either 2 or 4 total locations, depending
2273 // on else's availability.
2274 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2275 if (elseLoc)
2276 ifLocs.push_back(*elseLoc);
2277 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2278
2279 // Emit the code with the fully general case.
2280 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2281 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2282 /*thenBuilder=*/thenBuilder,
2283 /*elseBuilder=*/elseBuilder);
2284}
2285
2286/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2287mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2288 const Expr *cond) {
2291 cond = cond->IgnoreParens();
2292
2293 // In LLVM the condition is reversed here for efficient codegen.
2294 // This should be done in CIR prior to LLVM lowering, if we do now
2295 // we can make CIR based diagnostics misleading.
2296 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2298
2299 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2300 Expr *trueExpr = condOp->getTrueExpr();
2301 Expr *falseExpr = condOp->getFalseExpr();
2302 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2303
2304 mlir::Value ternaryOpRes =
2305 cir::TernaryOp::create(
2306 builder, loc, condV, /*thenBuilder=*/
2307 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2308 mlir::Value lhs = emitScalarExpr(trueExpr);
2309 cir::YieldOp::create(b, loc, lhs);
2310 },
2311 /*elseBuilder=*/
2312 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2313 mlir::Value rhs = emitScalarExpr(falseExpr);
2314 cir::YieldOp::create(b, loc, rhs);
2315 })
2316 .getResult();
2317
2318 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2319 getContext().BoolTy, condOp->getExprLoc());
2320 }
2321
2322 if (isa<CXXThrowExpr>(cond)) {
2323 cgm.errorNYI("NYI");
2324 return createDummyValue(loc, cond->getType());
2325 }
2326
2327 // If the branch has a condition wrapped by __builtin_unpredictable,
2328 // create metadata that specifies that the branch is unpredictable.
2329 // Don't bother if not optimizing because that metadata would not be used.
2331
2332 // Emit the code with the fully general case.
2333 return evaluateExprAsBool(cond);
2334}
2335
2336mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2337 mlir::Location loc, CharUnits alignment,
2338 bool insertIntoFnEntryBlock,
2339 mlir::Value arraySize) {
2340 mlir::Block *entryBlock = insertIntoFnEntryBlock
2342 : curLexScope->getEntryBlock();
2343
2344 // If this is an alloca in the entry basic block of a cir.try and there's
2345 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2346 // scope instead. This is necessary in order to guarantee all SSA values are
2347 // reachable during cleanups.
2348 if (auto tryOp =
2349 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2350 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2351 entryBlock = &scopeOp.getScopeRegion().front();
2352 }
2353
2354 return emitAlloca(name, ty, loc, alignment,
2355 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2356}
2357
2358mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2359 mlir::Location loc, CharUnits alignment,
2360 mlir::OpBuilder::InsertPoint ip,
2361 mlir::Value arraySize) {
2362 // CIR uses its own alloca address space rather than follow the target data
2363 // layout like original CodeGen. The data layout awareness should be done in
2364 // the lowering pass instead.
2365 cir::PointerType localVarPtrTy =
2367 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2368
2369 mlir::Value addr;
2370 {
2371 mlir::OpBuilder::InsertionGuard guard(builder);
2372 builder.restoreInsertionPoint(ip);
2373 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2374 /*var type*/ ty, name, alignIntAttr, arraySize);
2376 }
2377 return addr;
2378}
2379
2380// Note: this function also emit constructor calls to support a MSVC extensions
2381// allowing explicit constructor function call.
2384 const Expr *callee = ce->getCallee()->IgnoreParens();
2385
2386 if (isa<BinaryOperator>(callee))
2388
2389 const auto *me = cast<MemberExpr>(callee);
2390 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2391
2392 if (md->isStatic()) {
2393 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2394 return RValue::get(nullptr);
2395 }
2396
2397 bool hasQualifier = me->hasQualifier();
2398 NestedNameSpecifier qualifier = me->getQualifier();
2399 bool isArrow = me->isArrow();
2400 const Expr *base = me->getBase();
2401
2403 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2404}
2405
2407 // Emit the expression as an lvalue.
2408 LValue lv = emitLValue(e);
2409 assert(lv.isSimple());
2410 mlir::Value value = lv.getPointer();
2411
2413
2414 return RValue::get(value);
2415}
2416
2418 LValueBaseInfo *pointeeBaseInfo) {
2419 if (refLVal.isVolatile())
2420 cgm.errorNYI(loc, "load of volatile reference");
2421
2422 cir::LoadOp load =
2423 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2424 refLVal.getAddress().getPointer());
2425
2427
2428 QualType pointeeType = refLVal.getType()->getPointeeType();
2429 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2430 return Address(load, convertTypeForMem(pointeeType), align);
2431}
2432
2434 mlir::Location loc,
2435 QualType refTy,
2436 AlignmentSource source) {
2437 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2438 LValueBaseInfo pointeeBaseInfo;
2440 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2441 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2442 pointeeBaseInfo);
2443}
2444
2445void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2446 cir::TrapOp::create(builder, loc);
2447 if (createNewBlock)
2448 builder.createBlock(builder.getBlock()->getParent());
2449}
2450
2452 bool createNewBlock) {
2454 cir::UnreachableOp::create(builder, getLoc(loc));
2455 if (createNewBlock)
2456 builder.createBlock(builder.getBlock()->getParent());
2457}
2458
2459mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2460 clang::QualType qt) {
2461 mlir::Type t = convertType(qt);
2462 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2463 return builder.createDummyValue(loc, t, alignment);
2464}
2465
2466//===----------------------------------------------------------------------===//
2467// CIR builder helpers
2468//===----------------------------------------------------------------------===//
2469
2471 const Twine &name, Address *alloca,
2472 mlir::OpBuilder::InsertPoint ip) {
2473 // FIXME: Should we prefer the preferred type alignment here?
2474 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2475 alloca, ip);
2476}
2477
2479 mlir::Location loc, const Twine &name,
2480 Address *alloca,
2481 mlir::OpBuilder::InsertPoint ip) {
2482 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2483 /*ArraySize=*/nullptr, alloca, ip);
2484 if (ty->isConstantMatrixType()) {
2486 cgm.errorNYI(loc, "temporary matrix value");
2487 }
2488 return result;
2489}
2490
2491/// This creates a alloca and inserts it into the entry block of the
2492/// current region.
2494 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2495 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2496 cir::AllocaOp alloca = ip.isSet()
2497 ? createTempAlloca(ty, loc, name, ip, arraySize)
2498 : createTempAlloca(ty, loc, name, arraySize);
2499 alloca.setAlignmentAttr(cgm.getSize(align));
2500 return Address(alloca, ty, align);
2501}
2502
2503/// This creates a alloca and inserts it into the entry block. The alloca is
2504/// casted to default address space if necessary.
2505// TODO(cir): Implement address space casting to match classic codegen's
2506// CreateTempAlloca behavior with DestLangAS parameter
2508 mlir::Location loc, const Twine &name,
2509 mlir::Value arraySize,
2510 Address *allocaAddr,
2511 mlir::OpBuilder::InsertPoint ip) {
2512 Address alloca =
2513 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2514 if (allocaAddr)
2515 *allocaAddr = alloca;
2516 mlir::Value v = alloca.getPointer();
2517 // Alloca always returns a pointer in alloca address space, which may
2518 // be different from the type defined by the language. For example,
2519 // in C++ the auto variables are in the default address space. Therefore
2520 // cast alloca to the default address space when necessary.
2521
2522 cir::PointerType dstTy;
2524 dstTy = builder.getPointerTo(ty, getCIRAllocaAddressSpace());
2525 else
2526 dstTy = builder.getPointerTo(ty, clang::LangAS::Default);
2527 v = performAddrSpaceCast(v, dstTy);
2528
2529 return Address(v, ty, align);
2530}
2531
2532/// This creates an alloca and inserts it into the entry block if \p ArraySize
2533/// is nullptr, otherwise inserts it at the current insertion point of the
2534/// builder.
2535cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2536 mlir::Location loc,
2537 const Twine &name,
2538 mlir::Value arraySize,
2539 bool insertIntoFnEntryBlock) {
2540 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2541 insertIntoFnEntryBlock, arraySize)
2542 .getDefiningOp());
2543}
2544
2545/// This creates an alloca and inserts it into the provided insertion point
2546cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2547 mlir::Location loc,
2548 const Twine &name,
2549 mlir::OpBuilder::InsertPoint ip,
2550 mlir::Value arraySize) {
2551 assert(ip.isSet() && "Insertion point is not set");
2552 return mlir::cast<cir::AllocaOp>(
2553 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2554 .getDefiningOp());
2555}
2556
2557/// CreateDefaultAlignTempAlloca - This creates an alloca with the
2558/// default alignment of the corresponding LLVM type, which is *not*
2559/// guaranteed to be related in any way to the expected alignment of
2560/// an AST type that might have been lowered to Ty.
2562 mlir::Location loc,
2563 const Twine &name) {
2564 CharUnits align =
2565 CharUnits::fromQuantity(cgm.getDataLayout().getABITypeAlign(ty));
2566 return createTempAlloca(ty, align, loc, name);
2567}
2568
2569/// Try to emit a reference to the given value without producing it as
2570/// an l-value. For many cases, this is just an optimization, but it avoids
2571/// us needing to emit global copies of variables if they're named without
2572/// triggering a formal use in a context where we can't emit a direct
2573/// reference to them, for instance if a block or lambda or a member of a
2574/// local class uses a const int variable or constexpr variable from an
2575/// enclosing function.
2576///
2577/// For named members of enums, this is the only way they are emitted.
2580 const ValueDecl *value = refExpr->getDecl();
2581
2582 // There is a lot more to do here, but for now only EnumConstantDecl is
2583 // supported.
2585
2586 // The value needs to be an enum constant or a constant variable.
2587 if (!isa<EnumConstantDecl>(value))
2588 return ConstantEmission();
2589
2590 Expr::EvalResult result;
2591 if (!refExpr->EvaluateAsRValue(result, getContext()))
2592 return ConstantEmission();
2593
2594 QualType resultType = refExpr->getType();
2595
2596 // As long as we're only handling EnumConstantDecl, there should be no
2597 // side-effects.
2598 assert(!result.HasSideEffects);
2599
2600 // Emit as a constant.
2601 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2602 // somewhat heavy refactoring...)
2603 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2604 refExpr->getLocation(), result.Val, resultType);
2605 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2606 assert(cstToEmit && "expected a typed attribute");
2607
2609
2610 return ConstantEmission::forValue(cstToEmit);
2611}
2612
2616 return tryEmitAsConstant(dre);
2617 return ConstantEmission();
2618}
2619
2621 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2622 assert(constant && "not a constant");
2623 if (constant.isReference()) {
2624 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2625 return {};
2626 }
2627 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2628}
2629
2631 const StringLiteral *sl = e->getFunctionName();
2632 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2633 auto fn = cast<cir::FuncOp>(curFn);
2634 StringRef fnName = fn.getName();
2635 fnName.consume_front("\01");
2636 std::array<StringRef, 2> nameItems = {
2638 std::string gvName = llvm::join(nameItems, ".");
2639 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2640 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2641
2642 return emitStringLiteralLValue(sl, gvName);
2643}
2644
2649
2650namespace {
2651// Handle the case where the condition is a constant evaluatable simple integer,
2652// which means we don't have to separately handle the true/false blocks.
2653std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2655 const Expr *condExpr = e->getCond();
2656 llvm::APSInt condExprVal;
2657 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2658 return std::nullopt;
2659
2660 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2661 if (!condExprVal.getBoolValue())
2662 std::swap(live, dead);
2663
2664 if (cgf.containsLabel(dead))
2665 return std::nullopt;
2666
2667 // If the true case is live, we need to track its region.
2670 // If a throw expression we emit it and return an undefined lvalue
2671 // because it can't be used.
2672 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2673 cgf.emitCXXThrowExpr(throwExpr);
2674 // Return an undefined lvalue - the throw terminates execution
2675 // so this value will never actually be used
2676 mlir::Type elemTy = cgf.convertType(dead->getType());
2677 mlir::Value undefPtr =
2678 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2679 cgf.getLoc(throwExpr->getSourceRange()));
2680 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2681 dead->getType());
2682 }
2683 return cgf.emitLValue(live);
2684}
2685
2686/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2687/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2688/// LValue is returned and the current block has been terminated.
2689static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2690 const Expr *operand) {
2691 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2692 cgf.emitCXXThrowExpr(throwExpr);
2693 return std::nullopt;
2694 }
2695
2696 return cgf.emitLValue(operand);
2697}
2698} // namespace
2699
2700// Create and generate the 3 blocks for a conditional operator.
2701// Leaves the 'current block' in the continuation basic block.
2702template <typename FuncTy>
2705 const FuncTy &branchGenFunc) {
2706 ConditionalInfo info;
2707 ConditionalEvaluation eval(*this);
2708 mlir::Location loc = getLoc(e->getSourceRange());
2709 CIRGenBuilderTy &builder = getBuilder();
2710
2711 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2713 mlir::Type yieldTy{};
2714
2715 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2716 const Expr *expr, std::optional<LValue> &resultLV) {
2717 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2718 curLexScope->setAsTernary();
2719
2721 eval.beginEvaluation();
2722 resultLV = branchGenFunc(*this, expr);
2723 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2724 eval.endEvaluation();
2725
2726 if (resultPtr) {
2727 yieldTy = resultPtr.getType();
2728 cir::YieldOp::create(b, loc, resultPtr);
2729 } else {
2730 // If LHS or RHS is a void expression we need
2731 // to patch arms as to properly match yield types.
2732 // If the current block's terminator is an UnreachableOp (from a throw),
2733 // we don't need a yield
2734 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2735 mlir::Operation *terminator =
2736 builder.getInsertionBlock()->getTerminator();
2737 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2738 insertPoints.push_back(b.saveInsertionPoint());
2739 }
2740 }
2741 };
2742
2743 info.result = cir::TernaryOp::create(
2744 builder, loc, condV,
2745 /*trueBuilder=*/
2746 [&](mlir::OpBuilder &b, mlir::Location loc) {
2747 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2748 },
2749 /*falseBuilder=*/
2750 [&](mlir::OpBuilder &b, mlir::Location loc) {
2751 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2752 })
2753 .getResult();
2754
2755 // If both arms are void, so be it.
2756 if (!yieldTy)
2757 yieldTy = voidTy;
2758
2759 // Insert required yields.
2760 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2761 mlir::OpBuilder::InsertionGuard guard(builder);
2762 builder.restoreInsertionPoint(toInsert);
2763
2764 // Block does not return: build empty yield.
2765 if (!yieldTy) {
2766 cir::YieldOp::create(builder, loc);
2767 } else { // Block returns: set null yield value.
2768 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2769 cir::YieldOp::create(builder, loc, op0);
2770 }
2771 }
2772
2773 return info;
2774}
2775
2778 if (!expr->isGLValue()) {
2779 // ?: here should be an aggregate.
2780 assert(hasAggregateEvaluationKind(expr->getType()) &&
2781 "Unexpected conditional operator!");
2782 return emitAggExprToLValue(expr);
2783 }
2784
2785 OpaqueValueMapping binding(*this, expr);
2786 if (std::optional<LValue> res =
2787 handleConditionalOperatorLValueSimpleCase(*this, expr))
2788 return *res;
2789
2790 ConditionalInfo info =
2791 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2792 return emitLValueOrThrowExpression(cgf, e);
2793 });
2794
2795 if ((info.lhs && !info.lhs->isSimple()) ||
2796 (info.rhs && !info.rhs->isSimple())) {
2797 cgm.errorNYI(expr->getSourceRange(),
2798 "unsupported conditional operator with non-simple lvalue");
2799 return LValue();
2800 }
2801
2802 if (info.lhs && info.rhs) {
2803 Address lhsAddr = info.lhs->getAddress();
2804 Address rhsAddr = info.rhs->getAddress();
2805 Address result(info.result, lhsAddr.getElementType(),
2806 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2807 AlignmentSource alignSource =
2808 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2809 info.rhs->getBaseInfo().getAlignmentSource());
2811 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2812 }
2813
2814 assert((info.lhs || info.rhs) &&
2815 "both operands of glvalue conditional are throw-expressions?");
2816 return info.lhs ? *info.lhs : *info.rhs;
2817}
2818
2819/// An LValue is a candidate for having its loads and stores be made atomic if
2820/// we are operating under /volatile:ms *and* the LValue itself is volatile and
2821/// performing such an operation can be performed without a libcall.
2823 if (!cgm.getLangOpts().MSVolatile)
2824 return false;
2825
2826 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2827 return false;
2828}
2829
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global, bool threadLocal=false)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2767
SourceLocation getEndLoc() const
Definition Expr.h:2770
QualType getElementType() const
Definition TypeBase.h:3784
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Type getType() const
Definition Address.h:115
bool isValid() const
Definition Address.h:75
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:139
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Operation * getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *mte, const Expr *init)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3766
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3325
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
const Expr * getBase() const
Definition Expr.h:6580
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4436
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4549
Represents a member of a struct/union/class.
Definition Decl.h:3175
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3278
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4827
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3411
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4767
Represents a function declaration or definition.
Definition Decl.h:2015
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4921
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4946
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4971
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8471
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1185
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4342
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:9034
bool isBooleanType() const
Definition TypeBase.h:9171
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:420
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9337
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8767
bool isFunctionPointerType() const
Definition TypeBase.h:8735
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2375
bool isConstantMatrixType() const
Definition TypeBase.h:8835
bool isPointerType() const
Definition TypeBase.h:8668
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
bool isVariableArrayType() const
Definition TypeBase.h:8779
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isExtVectorBoolType() const
Definition TypeBase.h:8815
bool isAnyComplexType() const
Definition TypeBase.h:8803
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9214
bool isAtomicType() const
Definition TypeBase.h:8860
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2850
bool isFunctionType() const
Definition TypeBase.h:8664
bool isVectorType() const
Definition TypeBase.h:8807
bool isSubscriptableVectorType() const
Definition TypeBase.h:8827
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2397
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2322
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2180
bool hasInit() const
Definition Decl.cpp:2410
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2378
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Represents a GCC generic vector type.
Definition TypeBase.h:4225
Defines the clang::TargetInfo interface.
mlir::ptr::MemorySpaceAttrInterface toCIRAddressSpaceAttr(mlir::MLIRContext &ctx, clang::LangAS langAS)
Convert an AST LangAS to the appropriate CIR address space attribute interface.
OverflowBehavior
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, CUDAKernelCallExpr > cudaKernelCallExpr
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615