clang 23.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "TargetInfo.h"
19#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
20#include "mlir/IR/BuiltinAttributes.h"
21#include "mlir/IR/Value.h"
22#include "clang/AST/Attr.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
33#include <optional>
34
35using namespace clang;
36using namespace clang::CIRGen;
37using namespace cir;
38
39/// Get the address of a zero-sized field within a record. Zero-sized fields
40/// (e.g. empty bases with [[no_unique_address]]) don't appear in the CIR
41/// record layout, so we compute their address using the ASTContext field
42/// offset and byte-level pointer arithmetic instead of cir.get_member.
44 const FieldDecl *field) {
45 CIRGenBuilderTy &builder = cgf.getBuilder();
47 cgf.getContext().getFieldOffset(field));
48 mlir::Type fieldType = cgf.convertType(field->getType());
49
50 if (offset.isZero()) {
51 return Address(builder.createPtrBitcast(base.getPointer(), fieldType),
52 base.getAlignment());
53 }
54
55 // Cast to byte pointer, stride by the field offset, then cast to the
56 // field pointer type (CIR pointers are typed, so we need explicit casts
57 // unlike OG's opaque-pointer GEP).
58 mlir::Location loc = cgf.getLoc(field->getLocation());
59 mlir::Value addr =
60 builder.createPtrBitcast(base.getPointer(), builder.getUInt8Ty());
61 addr = builder.createPtrStride(loc, addr,
62 builder.getUInt64(offset.getQuantity(), loc));
63 addr = builder.createPtrBitcast(addr, fieldType);
64 return Address(addr, base.getAlignment().alignmentAtOffset(offset));
65}
66
68 const FieldDecl *field,
69 llvm::StringRef fieldName,
70 unsigned fieldIndex) {
72 return emitAddrOfZeroSizeField(*this, base, field);
73
74 mlir::Location loc = getLoc(field->getLocation());
75
76 // Retrieve layout information for both type resolution and alignment.
77 const RecordDecl *rec = field->getParent();
78 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
79 unsigned idx = layout.getCIRFieldNo(field);
80
81 // For potentially-overlapping fields (e.g. [[no_unique_address]]), the
82 // record stores the base subobject type (without tail padding) rather than
83 // the complete object type. Use the record's member type for get_member,
84 // then bitcast to the complete type for downstream use.
85 //
86 // For unions, all fields map to index 0, so we use the field's declared type
87 // directly instead of looking up the member type from the layout.
88 mlir::Type fieldType = convertType(field->getType());
89 auto fieldPtr = cir::PointerType::get(fieldType);
90 bool needsBitcast = false;
91
92 if (!rec->isUnion() && field->isPotentiallyOverlapping()) {
93 mlir::Type memberType = layout.getCIRType().getMembers()[idx];
94 fieldPtr = cir::PointerType::get(memberType);
95 needsBitcast = true;
96 }
97
98 // For most cases fieldName is the same as field->getName() but for lambdas,
99 // which do not currently carry the name, so it can be passed down from the
100 // CaptureStmt.
101 mlir::Value addr = builder.createGetMember(loc, fieldPtr, base.getPointer(),
102 fieldName, fieldIndex);
103
104 // If the field is potentially overlapping, the record member uses the base
105 // subobject type. Cast to the complete object pointer type expected by
106 // callers (analogous to OG's opaque pointer behavior).
107 if (needsBitcast)
108 addr = builder.createPtrBitcast(addr, fieldType);
109
111 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
112 return Address(addr, base.getAlignment().alignmentAtOffset(offset));
113}
114
115/// Given an expression of pointer type, try to
116/// derive a more accurate bound on the alignment of the pointer.
118 LValueBaseInfo *baseInfo) {
119 // We allow this with ObjC object pointers because of fragile ABIs.
120 assert(expr->getType()->isPointerType() ||
121 expr->getType()->isObjCObjectPointerType());
122 expr = expr->IgnoreParens();
123
124 // Casts:
125 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
126 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
127 cgm.emitExplicitCastExprType(ece);
128
129 switch (ce->getCastKind()) {
130 // Non-converting casts (but not C's implicit conversion from void*).
131 case CK_BitCast:
132 case CK_NoOp:
133 case CK_AddressSpaceConversion: {
134 if (const auto *ptrTy =
135 ce->getSubExpr()->getType()->getAs<PointerType>()) {
136 if (ptrTy->getPointeeType()->isVoidType())
137 break;
138
139 LValueBaseInfo innerBaseInfo;
141 Address addr =
142 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
143 if (baseInfo)
144 *baseInfo = innerBaseInfo;
145
146 if (isa<ExplicitCastExpr>(ce)) {
147 LValueBaseInfo targetTypeBaseInfo;
148
149 const QualType pointeeType = expr->getType()->getPointeeType();
150 const CharUnits align =
151 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
152
153 // If the source l-value is opaque, honor the alignment of the
154 // casted-to type.
155 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
156 if (baseInfo)
157 baseInfo->mergeForCast(targetTypeBaseInfo);
158 addr = Address(addr.getPointer(), addr.getElementType(), align);
159 }
160 }
161
163
164 const mlir::Type eltTy =
165 convertTypeForMem(expr->getType()->getPointeeType());
166 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
167 addr, eltTy);
169
170 return addr;
171 }
172 break;
173 }
174
175 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
176 case CK_ArrayToPointerDecay:
177 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
178
179 case CK_UncheckedDerivedToBase:
180 case CK_DerivedToBase: {
183 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
184 const CXXRecordDecl *derived =
185 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
186 return getAddressOfBaseClass(addr, derived, ce->path(),
188 ce->getExprLoc());
189 }
190
191 case CK_AnyPointerToBlockPointerCast:
192 case CK_BaseToDerived:
193 case CK_BaseToDerivedMemberPointer:
194 case CK_BlockPointerToObjCPointerCast:
195 case CK_BuiltinFnToFnPtr:
196 case CK_CPointerToObjCPointerCast:
197 case CK_DerivedToBaseMemberPointer:
198 case CK_Dynamic:
199 case CK_FunctionToPointerDecay:
200 case CK_IntegralToPointer:
201 case CK_LValueToRValue:
202 case CK_LValueToRValueBitCast:
203 case CK_NullToMemberPointer:
204 case CK_NullToPointer:
205 case CK_ReinterpretMemberPointer:
206 case CK_UserDefinedConversion:
207 // Common pointer conversions, nothing to do here.
208 // TODO: Is there any reason to treat base-to-derived conversions
209 // specially?
210 break;
211
212 case CK_ARCConsumeObject:
213 case CK_ARCExtendBlockObject:
214 case CK_ARCProduceObject:
215 case CK_ARCReclaimReturnedObject:
216 case CK_AtomicToNonAtomic:
217 case CK_BooleanToSignedIntegral:
218 case CK_ConstructorConversion:
219 case CK_CopyAndAutoreleaseBlockObject:
220 case CK_Dependent:
221 case CK_FixedPointCast:
222 case CK_FixedPointToBoolean:
223 case CK_FixedPointToFloating:
224 case CK_FixedPointToIntegral:
225 case CK_FloatingCast:
226 case CK_FloatingComplexCast:
227 case CK_FloatingComplexToBoolean:
228 case CK_FloatingComplexToIntegralComplex:
229 case CK_FloatingComplexToReal:
230 case CK_FloatingRealToComplex:
231 case CK_FloatingToBoolean:
232 case CK_FloatingToFixedPoint:
233 case CK_FloatingToIntegral:
234 case CK_HLSLAggregateSplatCast:
235 case CK_HLSLArrayRValue:
236 case CK_HLSLElementwiseCast:
237 case CK_HLSLVectorTruncation:
238 case CK_HLSLMatrixTruncation:
239 case CK_IntToOCLSampler:
240 case CK_IntegralCast:
241 case CK_IntegralComplexCast:
242 case CK_IntegralComplexToBoolean:
243 case CK_IntegralComplexToFloatingComplex:
244 case CK_IntegralComplexToReal:
245 case CK_IntegralRealToComplex:
246 case CK_IntegralToBoolean:
247 case CK_IntegralToFixedPoint:
248 case CK_IntegralToFloating:
249 case CK_LValueBitCast:
250 case CK_MatrixCast:
251 case CK_MemberPointerToBoolean:
252 case CK_NonAtomicToAtomic:
253 case CK_ObjCObjectLValueCast:
254 case CK_PointerToBoolean:
255 case CK_PointerToIntegral:
256 case CK_ToUnion:
257 case CK_ToVoid:
258 case CK_VectorSplat:
259 case CK_ZeroToOCLOpaqueType:
260 // Classic codegen has a default that does nothing. In CIR, we are issuing
261 // a diagnostic so we can examine casts that are reached here to be sure
262 // no action is needed. If nothing is needed, the cast can be moved to the
263 // group above that does nothing.
264 cgm.errorNYI(ce->getSourceRange(),
265 "unexpected cast for emitPointerWithAlignment: ",
266 ce->getCastKindName());
267 break;
268 }
269 }
270
271 // Unary &
272 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
273 // TODO(cir): maybe we should use a CIR unary op for pointers here instead.
274 if (uo->getOpcode() == UO_AddrOf) {
275 LValue lv = emitLValue(uo->getSubExpr());
276 if (baseInfo)
277 *baseInfo = lv.getBaseInfo();
279 return lv.getAddress();
280 }
281 }
282
283 // std::addressof and variants.
284 if (auto const *call = dyn_cast<CallExpr>(expr)) {
285 switch (call->getBuiltinCallee()) {
286 default:
287 break;
288 case Builtin::BIaddressof:
289 case Builtin::BI__addressof:
290 case Builtin::BI__builtin_addressof: {
291 LValue lv = emitLValue(call->getArg(0));
292 if (baseInfo)
293 *baseInfo = lv.getBaseInfo();
295 return lv.getAddress();
296 }
297 }
298 }
299
300 // Otherwise, use the alignment of the type.
302 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
303 /*forPointeeType=*/true, baseInfo);
304}
305
307 LValue dst) {
308 auto getScalarSizeInBits = [&](mlir::Type ty) -> unsigned {
309 mlir::Type scalarTy = mlir::isa<cir::VectorType>(ty)
310 ? mlir::cast<cir::VectorType>(ty).getElementType()
311 : ty;
312 cir::CIRDataLayout dl = cgm.getDataLayout();
313 return dl.getTypeSizeInBits(scalarTy).getFixedValue();
314 };
315
316 mlir::Value srcVal = src.getValue();
317 Address dstAddr = dst.getExtVectorAddress();
318 if (getScalarSizeInBits(dstAddr.getElementType()) >
319 getScalarSizeInBits(srcVal.getType())) {
320 cgm.errorNYI(
321 dst.getPointer().getLoc(),
322 "emitStoreThroughExtVectorComponentLValue: dstTySize > srcTysize");
323 return;
324 }
325
326 if (getLangOpts().HLSL) {
327 cgm.errorNYI(dst.getPointer().getLoc(),
328 "emitStoreThroughExtVectorComponentLValue: HLSL");
329 return;
330 }
331
332 // This access turns into a read/modify/write of the vector. Load the input
333 // value now.
334 mlir::Location loc = dst.getExtVectorPointer().getLoc();
335
336 mlir::ArrayAttr elts = dst.getExtVectorElts();
337
338 mlir::Value vec = builder.createLoad(loc, dstAddr, dst.isVolatile());
339 if (const auto *vecTy = dst.getType()->getAs<clang::VectorType>()) {
340 unsigned numSrcElts = vecTy->getNumElements();
341 unsigned numDstElts = cast<cir::VectorType>(vec.getType()).getSize();
342 if (numDstElts == numSrcElts) {
343 // Use shuffle vector is the src and destination are the same number of
344 // elements and restore the vector mask since it is on the side it will be
345 // stored.
346 SmallVector<int64_t> mask(numDstElts);
347 for (unsigned i = 0; i != numDstElts; ++i)
348 mask[getAccessedFieldNo(i, elts)] = i;
349
350 vec = builder.createVecShuffle(loc, srcVal, mask);
351 } else if (numDstElts > numSrcElts) {
352 // Extended the source vector to the same length and then shuffle it
353 // into the destination.
354 // FIXME: since we're shuffling with undef, can we just use the indices
355 // into that? This could be simpler.
356 SmallVector<int64_t> extMask(numDstElts, -1);
357 std::iota(extMask.begin(), extMask.begin() + numSrcElts, 0);
358
359 mlir::Value extSrcVal = builder.createVecShuffle(loc, srcVal, extMask);
360
361 // build identity
362 SmallVector<int64_t> mask(numDstElts);
363 std::iota(mask.begin(), mask.begin() + numDstElts, 0);
364
365 // When the vector size is odd and .odd or .hi is used, the last element
366 // of the Elts constant array will be one past the size of the vector.
367 // Ignore the last element here, if it is greater than the mask size.
368 if ((unsigned)getAccessedFieldNo(numSrcElts - 1, elts) == mask.size())
369 numSrcElts--;
370
371 // modify when what gets shuffled in
372 for (unsigned i = 0; i != numSrcElts; ++i)
373 mask[getAccessedFieldNo(i, elts)] = i + numDstElts;
374
375 vec = builder.createVecShuffle(loc, vec, extSrcVal, mask);
376 } else {
377 // We should never shorten the vector
378 llvm_unreachable("unexpected shorten vector length");
379 }
380 } else {
381 // If the Src is a scalar (not a vector), and the target is a vector it
382 // must be updating one element.
383 unsigned inIdx = getAccessedFieldNo(0, elts);
384 cir::ConstantOp elt = builder.getSInt64(inIdx, loc);
385 vec = cir::VecInsertOp::create(builder, loc, vec, srcVal, elt);
386 }
387
388 builder.createStore(loc, vec, dst.getExtVectorAddress(),
389 dst.isVolatileQualified());
390}
391
393 bool isInit) {
394 if (!dst.isSimple()) {
395 if (dst.isVectorElt()) {
396 // Read/modify/write the vector, inserting the new element
397 const mlir::Location loc = dst.getVectorPointer().getLoc();
398 const mlir::Value vector =
399 builder.createLoad(loc, dst.getVectorAddress());
400 const mlir::Value newVector = cir::VecInsertOp::create(
401 builder, loc, vector, src.getValue(), dst.getVectorIdx());
402 builder.createStore(loc, newVector, dst.getVectorAddress());
403 return;
404 }
405
406 if (dst.isExtVectorElt())
408
409 assert(dst.isBitField() && "Unknown LValue type");
411 return;
412
413 cgm.errorNYI(dst.getPointer().getLoc(),
414 "emitStoreThroughLValue: non-simple lvalue");
415 return;
416 }
417
419
420 assert(src.isScalar() && "Can't emit an aggregate store with this method");
421 emitStoreOfScalar(src.getValue(), dst, isInit);
422}
423
424static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
425 const VarDecl *vd) {
426 QualType t = e->getType();
427
428 // If it's thread_local, emit a call to its wrapper function instead.
429 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
430 cgf.cgm.errorNYI(e->getSourceRange(),
431 "emitGlobalVarDeclLValue: thread_local variable");
432
433 // Check if the variable is marked as declare target with link clause in
434 // device codegen.
435 if (cgf.getLangOpts().OpenMP)
436 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
437
438 // Traditional LLVM codegen handles thread local separately, CIR handles
439 // as part of getAddrOfGlobalVar.
440 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
441
442 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
443 cir::PointerType realPtrTy = cir::PointerType::get(
444 realVarTy, mlir::cast<cir::PointerType>(v.getType()).getAddrSpace());
445 if (realPtrTy != v.getType())
446 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
447
448 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
449 Address addr(v, realVarTy, alignment);
450 LValue lv;
451 if (vd->getType()->isReferenceType())
452 lv = cgf.emitLoadOfReferenceLValue(addr, cgf.getLoc(e->getSourceRange()),
454 else
455 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
457 return lv;
458}
459
460void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
461 bool isVolatile, QualType ty,
462 LValueBaseInfo baseInfo, bool isInit,
463 bool isNontemporal) {
464
465 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
466 // Boolean vectors use `iN` as storage type.
467 if (clangVecTy->isExtVectorBoolType())
468 cgm.errorNYI(addr.getPointer().getLoc(),
469 "emitStoreOfScalar ExtVectorBoolType");
470
471 // Handle vectors of size 3 like size 4 for better performance.
472 const mlir::Type elementType = addr.getElementType();
473 const auto vecTy = cast<cir::VectorType>(elementType);
474
475 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
477 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
478 cgm.errorNYI(addr.getPointer().getLoc(),
479 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
480 }
481
482 value = emitToMemory(value, ty);
483
485 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
486 if (ty->isAtomicType() ||
487 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
488 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
489 return;
490 }
491
492 // Update the alloca with more info on initialization.
493 assert(addr.getPointer() && "expected pointer to exist");
494 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
495 if (currVarDecl && srcAlloca) {
496 const VarDecl *vd = currVarDecl;
497 assert(vd && "VarDecl expected");
498 if (vd->hasInit())
499 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
500 }
501
502 assert(currSrcLoc && "must pass in source location");
503 builder.createStore(*currSrcLoc, value, addr, isVolatile);
504
505 if (isNontemporal) {
506 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
507 return;
508 }
509
511}
512
513// TODO: Replace this with a proper TargetInfo function call.
514/// Helper method to check if the underlying ABI is AAPCS
515static bool isAAPCS(const TargetInfo &targetInfo) {
516 return targetInfo.getABI().starts_with("aapcs");
517}
518
520 LValue dst) {
521
522 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
523 mlir::Type resLTy = convertTypeForMem(dst.getType());
524 Address ptr = dst.getBitFieldAddress();
525
526 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
527 dst.isVolatileQualified() &&
528 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
529
530 assert(currSrcLoc && "must pass in source location");
531
532 return builder.createSetBitfield(*currSrcLoc, resLTy, ptr,
533 ptr.getElementType(), src.getValue(), info,
534 dst.isVolatileQualified(), useVoaltile);
535}
536
538 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
539
540 // Get the output type.
541 mlir::Type resLTy = convertType(lv.getType());
542 Address ptr = lv.getBitFieldAddress();
543
544 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
545 isAAPCS(cgm.getTarget());
546
547 mlir::Value field =
548 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
549 info, lv.isVolatile(), useVoaltile);
551 return RValue::get(field);
552}
553
555 const FieldDecl *field,
556 mlir::Type fieldType,
557 unsigned index) {
558 mlir::Location loc = getLoc(field->getLocation());
559 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
561 cir::GetMemberOp sea = getBuilder().createGetMember(
562 loc, fieldPtr, base.getPointer(), field->getName(),
563 rec.isUnion() ? field->getFieldIndex() : index);
565 rec.getElementOffset(cgm.getDataLayout().layout, index));
566 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
567}
568
570 const FieldDecl *field) {
571 LValueBaseInfo baseInfo = base.getBaseInfo();
572 const CIRGenRecordLayout &layout =
573 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
574 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
575
577
578 unsigned idx = layout.getCIRFieldNo(field);
579 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
580
581 mlir::Location loc = getLoc(field->getLocation());
582 if (addr.getElementType() != info.storageType)
583 addr = builder.createElementBitCast(loc, addr, info.storageType);
584
585 QualType fieldType =
587 // TODO(cir): Support TBAA for bit fields.
589 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
590 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
591}
592
594 LValueBaseInfo baseInfo = base.getBaseInfo();
595
596 if (field->isBitField())
597 return emitLValueForBitField(base, field);
598
599 QualType fieldType = field->getType();
600 const RecordDecl *rec = field->getParent();
601 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
602 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
604
605 Address addr = base.getAddress();
606 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
607 if (cgm.getCodeGenOpts().StrictVTablePointers &&
608 classDecl->isDynamicClass()) {
609 cgm.errorNYI(field->getSourceRange(),
610 "emitLValueForField: strict vtable for dynamic class");
611 }
612 }
613
614 unsigned recordCVR = base.getVRQualifiers();
615
616 llvm::StringRef fieldName = field->getName();
617 unsigned fieldIndex;
618 if (cgm.lambdaFieldToName.count(field))
619 fieldName = cgm.lambdaFieldToName[field];
620
621 // Empty fields don't have entries in the record layout, so handle them
622 // separately. They just use the base address directly with the right type.
623 if (!rec->isUnion() && isEmptyFieldForLayout(getContext(), field)) {
624 addr = emitAddrOfZeroSizeField(*this, addr, field);
625 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
626 lv.getQuals().addCVRQualifiers(recordCVR);
627 return lv;
628 }
629
630 if (rec->isUnion())
631 fieldIndex = field->getFieldIndex();
632 else {
633 const CIRGenRecordLayout &layout =
634 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
635 fieldIndex = layout.getCIRFieldNo(field);
636 }
637
638 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
640
641 // If this is a reference field, load the reference right now.
642 if (fieldType->isReferenceType()) {
644 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
645 if (recordCVR & Qualifiers::Volatile)
646 refLVal.getQuals().addVolatile();
647 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
648 &fieldBaseInfo);
649
650 // Qualifiers on the struct don't apply to the referencee.
651 recordCVR = 0;
652 fieldType = fieldType->getPointeeType();
653 }
654
655 if (field->hasAttr<AnnotateAttr>()) {
656 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
657 return LValue();
658 }
659
660 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
661 lv.getQuals().addCVRQualifiers(recordCVR);
662
663 // __weak attribute on a field is ignored.
665 cgm.errorNYI(field->getSourceRange(),
666 "emitLValueForField: __weak attribute");
667 return LValue();
668 }
669
670 return lv;
671}
672
674 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
675 QualType fieldType = field->getType();
676
677 if (!fieldType->isReferenceType())
678 return emitLValueForField(base, field);
679
680 Address v = base.getAddress();
681 if (isEmptyFieldForLayout(getContext(), field)) {
682 v = emitAddrOfZeroSizeField(*this, v, field);
683 } else {
684 const CIRGenRecordLayout &layout =
685 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
686 unsigned fieldIndex = layout.getCIRFieldNo(field);
687 v = emitAddrOfFieldStorage(v, field, fieldName, fieldIndex);
688 }
689
690 // Make sure that the address is pointing to the right type.
691 mlir::Type memTy = convertTypeForMem(fieldType);
692 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
693
694 // TODO: Generate TBAA information that describes this access as a structure
695 // member access and not just an access to an object of the field's type. This
696 // should be similar to what we do in EmitLValueForField().
697 LValueBaseInfo baseInfo = base.getBaseInfo();
698 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
699 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
701 return makeAddrLValue(v, fieldType, fieldBaseInfo);
702}
703
704/// Converts a scalar value from its primary IR type (as returned
705/// by ConvertType) to its load/store type.
706mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
707 if (auto *atomicTy = ty->getAs<AtomicType>())
708 ty = atomicTy->getValueType();
709
710 if (ty->isExtVectorBoolType()) {
711 cgm.errorNYI("emitToMemory: extVectorBoolType");
712 }
713
714 // Unlike in classic codegen CIR, bools are kept as `cir.bool` and BitInts are
715 // kept as `cir.int<N>` until further lowering
716
717 return value;
718}
719
720mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
721 if (auto *atomicTy = ty->getAs<AtomicType>())
722 ty = atomicTy->getValueType();
723
725 cgm.errorNYI("emitFromMemory: PackedVectorBoolType");
726 }
727
728 return value;
729}
730
731void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
732 bool isInit) {
733 if (lvalue.getType()->isConstantMatrixType()) {
734 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
735 return;
736 }
737
738 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
739 lvalue.getType(), lvalue.getBaseInfo(), isInit,
740 /*isNontemporal=*/false);
741}
742
743mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
744 QualType ty, SourceLocation loc,
745 LValueBaseInfo baseInfo) {
746 // Traditional LLVM codegen handles thread local separately, CIR handles
747 // as part of getAddrOfGlobalVar (GetGlobalOp).
748 mlir::Type eltTy = addr.getElementType();
749
750 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
751 if (clangVecTy->isExtVectorBoolType()) {
752 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
753 return nullptr;
754 }
755
756 const auto vecTy = cast<cir::VectorType>(eltTy);
757
758 // Handle vectors of size 3 like size 4 for better performance.
760 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
761 cgm.errorNYI(addr.getPointer().getLoc(),
762 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
763 }
764
766 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
767 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
768 return emitAtomicLoad(atomicLValue, loc).getValue();
769
770 if (mlir::isa<cir::VoidType>(eltTy))
771 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
772
774
775 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
776 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
777 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
778
779 return loadOp;
780}
781
783 SourceLocation loc) {
786 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
787 lvalue.getType(), loc, lvalue.getBaseInfo());
788}
789
790/// Given an expression that represents a value lvalue, this
791/// method emits the address of the lvalue, then loads the result as an rvalue,
792/// returning the rvalue.
794 assert(!lv.getType()->isFunctionType());
795 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
796
797 if (lv.isBitField())
798 return emitLoadOfBitfieldLValue(lv, loc);
799
800 if (lv.isSimple())
801 return RValue::get(emitLoadOfScalar(lv, loc));
802
803 if (lv.isVectorElt()) {
804 const mlir::Value load =
805 builder.createLoad(getLoc(loc), lv.getVectorAddress());
806 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
807 lv.getVectorIdx()));
808 }
809
810 if (lv.isExtVectorElt())
812
813 cgm.errorNYI(loc, "emitLoadOfLValue");
814 return RValue::get(nullptr);
815}
816
817int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
818 const mlir::ArrayAttr elts) {
819 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
820 return elt.getInt();
821}
822
823// If this is a reference to a subset of the elements of a vector, create an
824// appropriate shufflevector.
826 mlir::Location loc = lv.getExtVectorPointer().getLoc();
827 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
828
829 // HLSL allows treating scalars as one-element vectors. Converting the scalar
830 // IR value to a vector here allows the rest of codegen to behave as normal.
831 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
832 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
833 return {};
834 }
835
836 const mlir::ArrayAttr elts = lv.getExtVectorElts();
837
838 // If the result of the expression is a non-vector type, we must be extracting
839 // a single element. Just codegen as an extractelement.
840 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
841 if (!exprVecTy) {
842 int64_t indexValue = getAccessedFieldNo(0, elts);
843 cir::ConstantOp index =
844 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
845 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
846 }
847
848 // Always use shuffle vector to try to retain the original program structure
850 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
851 mask.push_back(getAccessedFieldNo(i, elts));
852
853 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
854 if (lv.getType()->isExtVectorBoolType()) {
855 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
856 return {};
857 }
858
859 return RValue::get(resultVec);
860}
861
862LValue
864 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
865 "unexpected binary operator opcode");
866
867 Address baseAddr = Address::invalid();
868 if (e->getOpcode() == BO_PtrMemD)
869 baseAddr = emitLValue(e->getLHS()).getAddress();
870 else
871 baseAddr = emitPointerWithAlignment(e->getLHS());
872
873 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
874
875 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
876
877 LValueBaseInfo baseInfo;
879 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
880 memberPtrTy, &baseInfo);
881
882 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
883}
884
885/// Generates lvalue for partial ext_vector access.
887 mlir::Location loc) {
888 Address vectorAddress = lv.getExtVectorAddress();
889 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
890 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
891 Address castToPointerElement =
892 vectorAddress.withElementType(builder, vectorElementTy);
893
894 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
895 unsigned idx = getAccessedFieldNo(0, extVecElts);
896 mlir::Value idxValue =
897 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
898
899 mlir::Value elementValue = builder.getArrayElement(
900 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
901 /*shouldDecay=*/false);
902
903 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
904 const CharUnits alignment =
905 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
906 return Address(elementValue, vectorElementTy, alignment);
907}
908
909static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
911 return cgm.getAddrOfFunction(gd);
912}
913
915 mlir::Value thisValue) {
916 return cgf.emitLValueForLambdaField(fd, thisValue);
917}
918
919/// Given that we are currently emitting a lambda, emit an l-value for
920/// one of its members.
921///
923 mlir::Value thisValue) {
924 bool hasExplicitObjectParameter = false;
925 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
926 LValue lambdaLV;
927 if (methD) {
928 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
929 assert(methD->getParent()->isLambda());
930 assert(methD->getParent() == field->getParent());
931 }
932 if (hasExplicitObjectParameter) {
933 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
934 } else {
935 QualType lambdaTagType =
937 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
938 }
939 return emitLValueForField(lambdaLV, field);
940}
941
945
946static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
947 GlobalDecl gd) {
948 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
949 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
950 mlir::Location loc = cgf.getLoc(e->getSourceRange());
951 CharUnits align = cgf.getContext().getDeclAlign(fd);
952
954
955 mlir::Type fnTy = funcOp.getFunctionType();
956 mlir::Type ptrTy = cir::PointerType::get(fnTy);
957 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
958 funcOp.getSymName());
959
960 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
961 fnTy = cgf.convertType(fd->getType());
962 ptrTy = cir::PointerType::get(fnTy);
963
964 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
965 cir::CastKind::bitcast, addr);
966 }
967
968 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
970}
971
972/// Determine whether we can emit a reference to \p vd from the current
973/// context, despite not necessarily having seen an odr-use of the variable in
974/// this context.
975/// TODO(cir): This could be shared with classic codegen.
977 const DeclRefExpr *e,
978 const VarDecl *vd) {
979 // For a variable declared in an enclosing scope, do not emit a spurious
980 // reference even if we have a capture, as that will emit an unwarranted
981 // reference to our capture state, and will likely generate worse code than
982 // emitting a local copy.
984 return false;
985
986 // For a local declaration declared in this function, we can always reference
987 // it even if we don't have an odr-use.
988 if (vd->hasLocalStorage()) {
989 return vd->getDeclContext() ==
990 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
991 }
992
993 // For a global declaration, we can emit a reference to it if we know
994 // for sure that we are able to emit a definition of it.
995 vd = vd->getDefinition(cgf.getContext());
996 if (!vd)
997 return false;
998
999 // Don't emit a spurious reference if it might be to a variable that only
1000 // exists on a different device / target.
1001 // FIXME: This is unnecessarily broad. Check whether this would actually be a
1002 // cross-target reference.
1003 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
1004 cgf.getLangOpts().OpenCL) {
1005 return false;
1006 }
1007
1008 // We can emit a spurious reference only if the linkage implies that we'll
1009 // be emitting a non-interposable symbol that will be retained until link
1010 // time.
1011 switch (cgf.cgm.getCIRLinkageVarDefinition(vd)) {
1012 case cir::GlobalLinkageKind::ExternalLinkage:
1013 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
1014 case cir::GlobalLinkageKind::WeakODRLinkage:
1015 case cir::GlobalLinkageKind::InternalLinkage:
1016 case cir::GlobalLinkageKind::PrivateLinkage:
1017 return true;
1018 default:
1019 return false;
1020 }
1021}
1022
1024 const NamedDecl *nd = e->getDecl();
1025 QualType ty = e->getType();
1026
1027 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
1028 "should not emit an unevaluated operand");
1029
1030 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
1031 // Global Named registers access via intrinsics only
1032 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
1033 !vd->isLocalVarDecl()) {
1034 cgm.errorNYI(e->getSourceRange(),
1035 "emitDeclRefLValue: Global Named registers access");
1036 return LValue();
1037 }
1038
1039 if (e->isNonOdrUse() == NOUR_Constant &&
1040 (vd->getType()->isReferenceType() ||
1041 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
1042 vd->getAnyInitializer(vd);
1043 mlir::Attribute val = ConstantEmitter(*this).emitAbstract(
1044 e->getLocation(), *vd->evaluateValue(), vd->getType());
1045 assert(val && "failed to emit constant expression");
1046
1047 Address addr = Address::invalid();
1048 if (!vd->getType()->isReferenceType()) {
1049 // Spill the constant value to a global.
1050 addr = cgm.createUnnamedGlobalFrom(*vd, val,
1051 getContext().getDeclAlign(vd));
1052 mlir::Type varTy = getTypes().convertTypeForMem(vd->getType());
1053 auto ptrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
1054 if (ptrTy.getPointee() != varTy) {
1055 addr = addr.withElementType(builder, varTy);
1056 }
1057 } else {
1058 // Should we be using the alignment of the constant pointer we emitted?
1059 CharUnits alignment = cgm.getNaturalTypeAlignment(
1060 e->getType(), /*baseInfo=*/nullptr, /*forPointeeType=*/true);
1061 // Classic codegen passes TBAA as null-ptr to the above function, so it
1062 // probably needs to deal with that.
1064 mlir::Value ptrVal = getBuilder().getConstant(
1065 getLoc(e->getSourceRange()), mlir::cast<mlir::TypedAttr>(val));
1066 addr = makeNaturalAddressForPointer(ptrVal, ty, alignment);
1067 }
1068 return makeAddrLValue(addr, ty, AlignmentSource::Decl);
1069 }
1070
1071 // Check for captured variables.
1073 vd = vd->getCanonicalDecl();
1074 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
1075 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
1078 }
1079 }
1080
1081 // FIXME: We should be able to assert this for FunctionDecls as well!
1082 // FIXME: We should be able to assert this for all DeclRefExprs, not just
1083 // those with a valid source location.
1084 assert((nd->isUsed(false) || !isa<VarDecl>(nd) || e->isNonOdrUse() ||
1085 !e->getLocation().isValid()) &&
1086 "Should not use decl without marking it used!");
1087
1088 if (nd->hasAttr<WeakRefAttr>())
1089 cgm.errorNYI(nd->getSourceRange(), "emitGlobal: WeakRefAttr");
1090
1091 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
1092 // Checks for omitted feature handling
1099
1100 // Check if this is a global variable
1101 if (vd->hasLinkage() || vd->isStaticDataMember())
1102 return emitGlobalVarDeclLValue(*this, e, vd);
1103
1104 Address addr = Address::invalid();
1105
1106 // The variable should generally be present in the local decl map.
1107 auto iter = localDeclMap.find(vd);
1108 if (iter != localDeclMap.end()) {
1109 addr = iter->second;
1110
1111 } else if (vd->isStaticLocal()) {
1112 // Otherwise, it might be static local we haven't emitted yet for some
1113 // reason; most likely, because it's in an outer function.
1114 cir::GlobalOp var =
1115 cgm.getOrCreateStaticVarDecl(*vd, cgm.getCIRLinkageVarDefinition(vd));
1116 mlir::Value getGlobVal = builder.createGetGlobal(var);
1117 auto getGlob = getGlobVal.getDefiningOp<cir::GetGlobalOp>();
1118 getGlob.setStaticLocal(var.getStaticLocalGuard().has_value());
1119 getGlob.setTls(vd->getTLSKind() != VarDecl::TLS_None);
1120 addr = Address(getGlob, convertTypeForMem(vd->getType()),
1121 getContext().getDeclAlign(vd));
1122 } else {
1123 llvm_unreachable("DeclRefExpr for Decl not entered in localDeclMap?");
1124 }
1125
1126 // Drill into reference types.
1127 LValue lv =
1128 vd->getType()->isReferenceType()
1132
1133 // Statics are defined as globals, so they are not include in the function's
1134 // symbol table.
1135 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
1136 "non-static locals should be already mapped");
1137
1138 return lv;
1139 }
1140
1141 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
1143 FieldDecl *fd = lambdaCaptureFields.lookup(bd);
1144 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
1145 }
1146 return emitLValue(bd->getBinding());
1147 }
1148
1149 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
1150 LValue lv = emitFunctionDeclLValue(*this, e, fd);
1151
1152 // Emit debuginfo for the function declaration if the target wants to.
1153 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
1155
1156 return lv;
1157 }
1158 if (isa<MSGuidDecl>(nd))
1159 cgm.errorNYI(e->getSourceRange(),
1160 "emitDeclRefLValue: unhandled MS Guid Decl");
1161
1162 if (const auto *tpo = dyn_cast<TemplateParamObjectDecl>(nd)) {
1163 CharUnits alignment = cgm.getNaturalTypeAlignment(tpo->getType());
1164 cir::GetGlobalOp atpo =
1165 builder.createGetGlobal(cgm.getAddrOfTemplateParamObject(tpo));
1167 "Do an address space conversion if necessary");
1168
1169 return makeAddrLValue(
1170 Address(atpo, convertTypeForMem(tpo->getType()), alignment), ty,
1172 }
1173
1174 llvm_unreachable("Unhandled DeclRefExpr");
1175}
1176
1178 QualType boolTy = getContext().BoolTy;
1179 SourceLocation loc = e->getExprLoc();
1180
1182 if (e->getType()->getAs<MemberPointerType>()) {
1183 cgm.errorNYI(e->getSourceRange(),
1184 "evaluateExprAsBool: member pointer type");
1185 return createDummyValue(getLoc(loc), boolTy);
1186 }
1187
1188 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1189 if (!e->getType()->isAnyComplexType())
1190 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
1191
1193 loc);
1194}
1195
1197 UnaryOperatorKind op = e->getOpcode();
1198
1199 // __extension__ doesn't affect lvalue-ness.
1200 if (op == UO_Extension)
1201 return emitLValue(e->getSubExpr());
1202
1203 switch (op) {
1204 case UO_Deref: {
1206 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1207
1209 LValueBaseInfo baseInfo;
1210 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
1211
1212 // Tag 'load' with deref attribute.
1213 // FIXME: This misses some derefence cases and has problematic interactions
1214 // with other operators.
1215 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
1216 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
1217
1218 LValue lv = makeAddrLValue(addr, t, baseInfo);
1221 return lv;
1222 }
1223 case UO_Real:
1224 case UO_Imag: {
1225 LValue lv = emitLValue(e->getSubExpr());
1226 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
1227
1228 // __real is valid on scalars. This is a faster way of testing that.
1229 // __imag can only produce an rvalue on scalars.
1230 if (e->getOpcode() == UO_Real &&
1231 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
1232 assert(e->getSubExpr()->getType()->isArithmeticType());
1233 return lv;
1234 }
1235
1237 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1238 mlir::Location loc = getLoc(e->getExprLoc());
1239 Address component =
1240 e->getOpcode() == UO_Real
1241 ? builder.createComplexRealPtr(loc, lv.getAddress())
1242 : builder.createComplexImagPtr(loc, lv.getAddress());
1244 LValue elemLV = makeAddrLValue(component, elemTy);
1245 elemLV.getQuals().addQualifiers(lv.getQuals());
1246 return elemLV;
1247 }
1248 case UO_PreInc:
1249 case UO_PreDec: {
1250 LValue lv = emitLValue(e->getSubExpr());
1251
1252 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1253
1254 if (e->getType()->isAnyComplexType())
1256 else
1258
1259 return lv;
1260 }
1261 case UO_Extension:
1262 llvm_unreachable("UnaryOperator extension should be handled above!");
1263 case UO_Plus:
1264 case UO_Minus:
1265 case UO_Not:
1266 case UO_LNot:
1267 case UO_AddrOf:
1268 case UO_PostInc:
1269 case UO_PostDec:
1270 case UO_Coawait:
1271 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1272 }
1273 llvm_unreachable("Unknown unary operator kind!");
1274}
1275
1276/// If the specified expr is a simple decay from an array to pointer,
1277/// return the array subexpression.
1278/// FIXME: this could be abstracted into a common AST helper.
1279static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1280 // If this isn't just an array->pointer decay, bail out.
1281 const auto *castExpr = dyn_cast<CastExpr>(e);
1282 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1283 return nullptr;
1284
1285 // If this is a decay from variable width array, bail out.
1286 const Expr *subExpr = castExpr->getSubExpr();
1287 if (subExpr->getType()->isVariableArrayType())
1288 return nullptr;
1289
1290 return subExpr;
1291}
1292
1293static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1294 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1295 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1296 return constantOp.getValueAttr<cir::IntAttr>();
1297 return {};
1298}
1299
1300static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1301 CharUnits eltSize) {
1302 // If we have a constant index, we can use the exact offset of the
1303 // element we're accessing.
1304 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1305 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1306 return arrayAlign.alignmentAtOffset(offset);
1307 }
1308 // Otherwise, use the worst-case alignment for any element.
1309 return arrayAlign.alignmentOfArrayElement(eltSize);
1310}
1311
1313 const VariableArrayType *vla) {
1314 QualType eltType;
1315 do {
1316 eltType = vla->getElementType();
1317 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1318 return eltType;
1319}
1320
1322 mlir::Location beginLoc,
1323 mlir::Location endLoc, mlir::Value ptr,
1324 mlir::Type eltTy, mlir::Value idx,
1325 bool shouldDecay) {
1326 CIRGenModule &cgm = cgf.getCIRGenModule();
1327 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1328 // that would enhance tracking this later in CIR?
1330 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1331 shouldDecay);
1332}
1333
1335 mlir::Location beginLoc,
1336 mlir::Location endLoc, Address addr,
1337 QualType eltType, mlir::Value idx,
1338 mlir::Location loc, bool shouldDecay) {
1339
1340 // Determine the element size of the statically-sized base. This is
1341 // the thing that the indices are expressed in terms of.
1342 if (const VariableArrayType *vla =
1343 cgf.getContext().getAsVariableArrayType(eltType)) {
1344 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1345 }
1346
1347 // We can use that to compute the best alignment of the element.
1348 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1349 const CharUnits eltAlign =
1350 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1351
1353 const mlir::Value eltPtr =
1354 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1355 addr.getElementType(), idx, shouldDecay);
1356 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1357 return Address(eltPtr, elementType, eltAlign);
1358}
1359
1360LValue
1362 if (e->getType()->getAs<ObjCObjectType>()) {
1363 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1365 }
1366
1367 // The index must always be an integer, which is not an aggregate. Emit it
1368 // in lexical order (this complexity is, sadly, required by C++17).
1369 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1370 "index was neither LHS nor RHS");
1371
1372 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1373 mlir::Value idx = emitScalarExpr(e->getIdx());
1374
1376
1377 // Extend or truncate the index type to pointer-sized integer.
1378 if (promote) {
1379 // Choose the type we extend or truncate to based on the signedness of the
1380 // index type.
1381 mlir::Type desiredIdxTy =
1383 ? ptrDiffTy
1384 : uIntPtrTy;
1385
1386 if (idx.getType() != desiredIdxTy) {
1387 cir::CastKind kind = mlir::isa<cir::BoolType>(idx.getType())
1388 ? cir::CastKind::bool_to_int
1389 : cir::CastKind::integral;
1390 idx = builder.createOrFold<cir::CastOp>(idx.getLoc(), desiredIdxTy,
1391 kind, idx);
1392 }
1393 }
1394
1395 return idx;
1396 };
1397
1398 // If the base is a vector type, then we are forming a vector element
1399 // with this subscript.
1400 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1402 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1403 const LValue lv = emitLValue(e->getBase());
1404 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1405 lv.getBaseInfo());
1406 }
1407
1408 // The HLSL runtime handles subscript expressions on global resource arrays
1409 // and objects with HLSL buffer layouts.
1410 if (getLangOpts().HLSL) {
1411 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: HLSL");
1412 return {};
1413 }
1414
1415 mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1416
1417 // Handle the extvector case we ignored above.
1419 const LValue lv = emitLValue(e->getBase());
1420 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1421
1422 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1423 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1424 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1425 idx, cgm.getLoc(e->getExprLoc()),
1426 /*shouldDecay=*/false);
1427
1428 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1429 }
1430
1431 if (const VariableArrayType *vla =
1432 getContext().getAsVariableArrayType(e->getType())) {
1433 // The base must be a pointer, which is not an aggregate. Emit
1434 // it. It needs to be emitted first in case it's what captures
1435 // the VLA bounds.
1437
1438 // The element count here is the total number of non-VLA elements.
1439 mlir::Value numElements = getVLASize(vla).numElts;
1440 idx = builder.createIntCast(idx, numElements.getType());
1441
1442 // Effectively, the multiply by the VLA size is part of the GEP.
1443 // GEP indexes are signed, and scaling an index isn't permitted to
1444 // signed-overflow, so we use the same semantics for our explicit
1445 // multiply. We suppress this if overflow is not undefined behavior.
1446 OverflowBehavior overflowBehavior = getLangOpts().PointerOverflowDefined
1449 idx = builder.createMul(cgm.getLoc(e->getExprLoc()), idx, numElements,
1450 overflowBehavior);
1451
1452 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1453 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1454 idx, cgm.getLoc(e->getExprLoc()),
1455 /*shouldDecay=*/false);
1456
1457 return makeAddrLValue(addr, vla->getElementType(), LValueBaseInfo());
1458 }
1459
1460 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1461 LValue arrayLV;
1462 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1463 arrayLV = emitArraySubscriptExpr(ase);
1464 else
1465 arrayLV = emitLValue(array);
1466
1467 // Propagate the alignment from the array itself to the result.
1468 const Address addr = emitArraySubscriptPtr(
1469 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1470 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1471 /*shouldDecay=*/true);
1472
1473 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1474
1475 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1476 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1477 }
1478
1479 return lv;
1480 }
1481
1482 // The base must be a pointer; emit it with an estimate of its alignment.
1483 assert(e->getBase()->getType()->isPointerType() &&
1484 "The base must be a pointer");
1485
1486 LValueBaseInfo eltBaseInfo;
1487 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1488 // Propagate the alignment from the array itself to the result.
1489 const Address addxr = emitArraySubscriptPtr(
1490 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1491 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1492 /*shouldDecay=*/false);
1493
1494 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1495
1496 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1497 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1498 }
1499
1500 return lv;
1501}
1502
1504 // Emit the base vector as an l-value.
1505 LValue base;
1506
1507 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1508 if (e->isArrow()) {
1509 // If it is a pointer to a vector, emit the address and form an lvalue with
1510 // it.
1511 LValueBaseInfo baseInfo;
1512 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1513 const auto *clangPtrTy =
1515 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1516 base.getQuals().removeObjCGCAttr();
1517 } else if (e->getBase()->isGLValue()) {
1518 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1519 // emit the base as an lvalue.
1520 assert(e->getBase()->getType()->isVectorType());
1521 base = emitLValue(e->getBase());
1522 } else {
1523 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1524 assert(e->getBase()->getType()->isVectorType() &&
1525 "Result must be a vector");
1526 mlir::Value vec = emitScalarExpr(e->getBase());
1527
1528 // Store the vector to memory (because LValue wants an address).
1529 QualType baseTy = e->getBase()->getType();
1530 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1531 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1532 cgm.errorNYI(e->getSourceRange(),
1533 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1534 return {};
1535 }
1536 builder.createStore(vec.getLoc(), vec, vecMem);
1537 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1538 }
1539
1540 QualType type =
1542
1543 // Encode the element access list into a vector of unsigned indices.
1545 e->getEncodedElementAccess(indices);
1546
1547 if (base.isSimple()) {
1548 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1549 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1550 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1551 base.getBaseInfo());
1552 }
1553
1554 if (base.isMatrixRow()) {
1555 cgm.errorNYI(e->getSourceRange(), "emitExtVectorElementExpr: isMatrixRow");
1556 return {};
1557 }
1558
1559 assert(base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1560 mlir::ArrayAttr baseElts = base.getExtVectorElts();
1562 for (unsigned idx : indices)
1563 elts.push_back(getAccessedFieldNo(idx, baseElts));
1564 mlir::ArrayAttr cv = builder.getI64ArrayAttr(elts);
1565 return LValue::makeExtVectorElt(base.getAddress(), cv, type,
1566 base.getBaseInfo());
1567}
1568
1570 llvm::StringRef name) {
1571 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1572 assert(globalOp.getAlignment() && "expected alignment for string literal");
1573 unsigned align = *(globalOp.getAlignment());
1574 mlir::Value addr =
1575 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1576 return makeAddrLValue(
1577 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1579}
1580
1581/// Casts are never lvalues unless that cast is to a reference type. If the cast
1582/// is to a reference, we can have the usual lvalue result, otherwise if a cast
1583/// is needed by the code generator in an lvalue context, then it must mean that
1584/// we need the address of an aggregate in order to access one of its members.
1585/// This can happen for all the reasons that casts are permitted with aggregate
1586/// result, including noop aggregate casts, and cast from scalar to union.
1588 switch (e->getCastKind()) {
1589 case CK_ToVoid:
1590 case CK_BitCast:
1591 case CK_LValueToRValueBitCast:
1592 case CK_ArrayToPointerDecay:
1593 case CK_FunctionToPointerDecay:
1594 case CK_NullToMemberPointer:
1595 case CK_NullToPointer:
1596 case CK_IntegralToPointer:
1597 case CK_PointerToIntegral:
1598 case CK_PointerToBoolean:
1599 case CK_IntegralCast:
1600 case CK_BooleanToSignedIntegral:
1601 case CK_IntegralToBoolean:
1602 case CK_IntegralToFloating:
1603 case CK_FloatingToIntegral:
1604 case CK_FloatingToBoolean:
1605 case CK_FloatingCast:
1606 case CK_FloatingRealToComplex:
1607 case CK_FloatingComplexToReal:
1608 case CK_FloatingComplexToBoolean:
1609 case CK_FloatingComplexCast:
1610 case CK_FloatingComplexToIntegralComplex:
1611 case CK_IntegralRealToComplex:
1612 case CK_IntegralComplexToReal:
1613 case CK_IntegralComplexToBoolean:
1614 case CK_IntegralComplexCast:
1615 case CK_IntegralComplexToFloatingComplex:
1616 case CK_DerivedToBaseMemberPointer:
1617 case CK_BaseToDerivedMemberPointer:
1618 case CK_MemberPointerToBoolean:
1619 case CK_ReinterpretMemberPointer:
1620 case CK_AnyPointerToBlockPointerCast:
1621 case CK_ARCProduceObject:
1622 case CK_ARCConsumeObject:
1623 case CK_ARCReclaimReturnedObject:
1624 case CK_ARCExtendBlockObject:
1625 case CK_CopyAndAutoreleaseBlockObject:
1626 case CK_IntToOCLSampler:
1627 case CK_FloatingToFixedPoint:
1628 case CK_FixedPointToFloating:
1629 case CK_FixedPointCast:
1630 case CK_FixedPointToBoolean:
1631 case CK_FixedPointToIntegral:
1632 case CK_IntegralToFixedPoint:
1633 case CK_MatrixCast:
1634 case CK_HLSLVectorTruncation:
1635 case CK_HLSLMatrixTruncation:
1636 case CK_HLSLArrayRValue:
1637 case CK_HLSLElementwiseCast:
1638 case CK_HLSLAggregateSplatCast:
1639 llvm_unreachable("unexpected cast lvalue");
1640
1641 case CK_Dependent:
1642 llvm_unreachable("dependent cast kind in IR gen!");
1643
1644 case CK_BuiltinFnToFnPtr:
1645 llvm_unreachable("builtin functions are handled elsewhere");
1646
1647 case CK_Dynamic: {
1648 LValue lv = emitLValue(e->getSubExpr());
1649 Address v = lv.getAddress();
1650 const auto *dce = cast<CXXDynamicCastExpr>(e);
1652 }
1653
1654 // These are never l-values; just use the aggregate emission code.
1655 case CK_ToUnion:
1656 return emitAggExprToLValue(e);
1657
1658 case CK_ConstructorConversion:
1659 case CK_UserDefinedConversion:
1660 case CK_CPointerToObjCPointerCast:
1661 case CK_BlockPointerToObjCPointerCast:
1662 case CK_LValueToRValue:
1663 return emitLValue(e->getSubExpr());
1664
1665 case CK_NonAtomicToAtomic:
1666 case CK_AtomicToNonAtomic:
1667 case CK_ObjCObjectLValueCast:
1668 case CK_VectorSplat: {
1669 cgm.errorNYI(e->getSourceRange(),
1670 std::string("emitCastLValue for unhandled cast kind: ") +
1671 e->getCastKindName());
1672
1673 return {};
1674 }
1675
1676 case CK_AddressSpaceConversion: {
1677 LValue lv = emitLValue(e->getSubExpr());
1678 QualType destTy = getContext().getPointerType(e->getType());
1679
1680 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1681 mlir::ptr::MemorySpaceAttrInterface srcAS;
1682 if (clang::isTargetAddressSpace(srcLangAS))
1683 srcAS = cir::toCIRAddressSpaceAttr(getMLIRContext(), srcLangAS);
1684 else
1685 cgm.errorNYI(
1686 e->getSourceRange(),
1687 "emitCastLValue: address space conversion from unknown address "
1688 "space");
1689
1690 mlir::Value v = performAddrSpaceCast(lv.getPointer(), convertType(destTy));
1691
1693 lv.getAddress().getAlignment()),
1694 e->getType(), lv.getBaseInfo());
1695 }
1696
1697 case CK_LValueBitCast: {
1698 // This must be a reinterpret_cast (or c-style equivalent).
1699 const auto *ce = cast<ExplicitCastExpr>(e);
1700
1701 cgm.emitExplicitCastExprType(ce, this);
1702 LValue LV = emitLValue(e->getSubExpr());
1704 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1705
1706 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1707 }
1708
1709 case CK_NoOp: {
1710 // CK_NoOp can model a qualification conversion, which can remove an array
1711 // bound and change the IR type.
1712 LValue lv = emitLValue(e->getSubExpr());
1713 // Propagate the volatile qualifier to LValue, if exists in e.
1715 lv.getQuals() = e->getType().getQualifiers();
1716 if (lv.isSimple()) {
1717 Address v = lv.getAddress();
1718 if (v.isValid()) {
1719 mlir::Type ty = convertTypeForMem(e->getType());
1720 if (v.getElementType() != ty) {
1721 // We have only inspected/reproduced this with complete to incomplete
1722 // array types, so we do an NYI for other cases, so we can make sure
1723 // we're doing a conversion we want to be making.
1724 auto fromTy = dyn_cast<cir::ArrayType>(v.getElementType());
1725 auto toTy = dyn_cast<cir::ArrayType>(ty);
1726 if (!fromTy || !toTy ||
1727 fromTy.getElementType() != toTy.getElementType() ||
1728 toTy.getSize() != 0)
1729 cgm.errorNYI(e->getSourceRange(),
1730 "emitCastLValue NoOp not array-shrink case");
1731
1732 lv = makeAddrLValue(v.withElementType(builder, ty), e->getType(),
1733 lv.getBaseInfo());
1734 }
1735 }
1736 }
1737 return lv;
1738 }
1739
1740 case CK_UncheckedDerivedToBase:
1741 case CK_DerivedToBase: {
1742 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1743
1744 LValue lv = emitLValue(e->getSubExpr());
1745 Address thisAddr = lv.getAddress();
1746
1747 // Perform the derived-to-base conversion
1748 Address baseAddr =
1749 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1750 /*NullCheckValue=*/false, e->getExprLoc());
1751
1752 // TODO: Support accesses to members of base classes in TBAA. For now, we
1753 // conservatively pretend that the complete object is of the base class
1754 // type.
1756 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1757 }
1758
1759 case CK_BaseToDerived: {
1760 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1761 LValue lv = emitLValue(e->getSubExpr());
1762
1763 // Perform the base-to-derived conversion
1765 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1766 e->path(), /*NullCheckValue=*/false);
1767 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1768 // performed and the object is not of the derived type.
1770
1772 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1773 }
1774
1775 case CK_ZeroToOCLOpaqueType:
1776 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1777 }
1778
1779 llvm_unreachable("Invalid cast kind");
1780}
1781
1783 const MemberExpr *me) {
1784 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1785 // Try to emit static variable member expressions as DREs.
1786 return DeclRefExpr::Create(
1788 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1789 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1790 }
1791 return nullptr;
1792}
1793
1795 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1797 return emitDeclRefLValue(dre);
1798 }
1799
1800 Expr *baseExpr = e->getBase();
1801 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1802 LValue baseLV;
1803 if (e->isArrow()) {
1804 LValueBaseInfo baseInfo;
1806 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1807 QualType ptrTy = baseExpr->getType()->getPointeeType();
1809 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1810 } else {
1812 baseLV = emitLValue(baseExpr);
1813 }
1814
1815 const NamedDecl *nd = e->getMemberDecl();
1816 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1817 LValue lv = emitLValueForField(baseLV, field);
1819 if (getLangOpts().OpenMP) {
1820 // If the member was explicitly marked as nontemporal, mark it as
1821 // nontemporal. If the base lvalue is marked as nontemporal, mark access
1822 // to children as nontemporal too.
1823 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1824 }
1825 return lv;
1826 }
1827
1828 if (const auto *fd = dyn_cast<FunctionDecl>(nd))
1829 return emitFunctionDeclLValue(*this, e, fd);
1830
1831 llvm_unreachable("Unhandled member declaration!");
1832}
1833
1834/// Evaluate an expression into a given memory location.
1836 Qualifiers quals, bool isInit) {
1837 // FIXME: This function should take an LValue as an argument.
1838 switch (getEvaluationKind(e->getType())) {
1839 case cir::TEK_Complex: {
1840 LValue lv = makeAddrLValue(location, e->getType());
1841 emitComplexExprIntoLValue(e, lv, isInit);
1842 return;
1843 }
1844
1845 case cir::TEK_Aggregate: {
1846 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1850 return;
1851 }
1852
1853 case cir::TEK_Scalar: {
1855 LValue lv = makeAddrLValue(location, e->getType());
1856 emitStoreThroughLValue(rv, lv);
1857 return;
1858 }
1859 }
1860
1861 llvm_unreachable("bad evaluation kind");
1862}
1863
1865 const MaterializeTemporaryExpr *m,
1866 const Expr *inner) {
1867 // TODO(cir): cgf.getTargetHooks();
1868 switch (m->getStorageDuration()) {
1869 case SD_FullExpression:
1870 case SD_Automatic: {
1871 QualType ty = inner->getType();
1872
1874
1875 // The temporary memory should be created in the same scope as the extending
1876 // declaration of the temporary materialization expression.
1877 cir::AllocaOp extDeclAlloca;
1878 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1879 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1880 if (extDeclAddrIter != cgf.localDeclMap.end())
1881 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1882 }
1883 mlir::OpBuilder::InsertPoint ip;
1884 if (extDeclAlloca) {
1885 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1886 } else if (cgf.isInConditionalBranch() &&
1888 // Place in the function entry block so the alloca dominates both
1889 // regions of any enclosing cir.cleanup.scope. The default path
1890 // would use curLexScope which may be a ternary branch.
1893 }
1894 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1895 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1896 ip);
1897 }
1898 case SD_Thread:
1899 case SD_Static: {
1900 auto addr =
1901 mlir::cast<cir::GlobalOp>(cgf.cgm.getAddrOfGlobalTemporary(m, inner));
1902 auto getGlobal = cgf.cgm.getBuilder().createGetGlobal(addr);
1903 assert(addr.getAlignment().has_value() &&
1904 "This should always have an alignment");
1905 return Address(getGlobal,
1906 clang::CharUnits::fromQuantity(addr.getAlignment().value()));
1907 }
1908
1909 case SD_Dynamic:
1910 llvm_unreachable("temporary can't have dynamic storage duration");
1911 }
1912 llvm_unreachable("unknown storage duration");
1913}
1914
1916 const MaterializeTemporaryExpr *m,
1917 const Expr *e, Address referenceTemporary) {
1918 // Objective-C++ ARC:
1919 // If we are binding a reference to a temporary that has ownership, we
1920 // need to perform retain/release operations on the temporary.
1921 //
1922 // FIXME(ogcg): This should be looking at e, not m.
1923 if (m->getType().getObjCLifetime()) {
1924 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1925 return;
1926 }
1927
1929 if (dk == QualType::DK_none)
1930 return;
1931
1932 switch (m->getStorageDuration()) {
1933 case SD_Static:
1934 case SD_Thread: {
1935 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1936 if (const auto *classDecl =
1938 classDecl && !classDecl->hasTrivialDestructor())
1939 // Get the destructor for the reference temporary.
1940 referenceTemporaryDtor = classDecl->getDestructor();
1941
1942 if (!referenceTemporaryDtor)
1943 return;
1944
1945 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1946 "storage duration with destructors");
1947 break;
1948 }
1949
1950 case SD_FullExpression:
1951 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1953 break;
1954
1955 case SD_Automatic:
1957 NormalAndEHCleanup, referenceTemporary, e->getType(),
1959 break;
1960
1961 case SD_Dynamic:
1962 llvm_unreachable("temporary cannot have dynamic storage duration");
1963 }
1964}
1965
1967 const MaterializeTemporaryExpr *m) {
1968 const Expr *e = m->getSubExpr();
1969
1970 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1971 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1972 "Reference should never be pseudo-strong!");
1973
1974 // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1975 // as that will cause the lifetime adjustment to be lost for ARC
1976 auto ownership = m->getType().getObjCLifetime();
1977 if (ownership != Qualifiers::OCL_None &&
1978 ownership != Qualifiers::OCL_ExplicitNone) {
1979 cgm.errorNYI(e->getSourceRange(),
1980 "emitMaterializeTemporaryExpr: ObjCLifetime");
1981 return {};
1982 }
1983
1986 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1987
1988 for (const Expr *ignored : commaLHSs)
1989 emitIgnoredExpr(ignored);
1990
1991 if (isa<OpaqueValueExpr>(e)) {
1992 cgm.errorNYI(e->getSourceRange(),
1993 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1994 return {};
1995 }
1996
1997 // Create and initialize the reference temporary.
1998 Address object = createReferenceTemporary(*this, m, e);
1999
2000 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
2001 // TODO(cir): add something akin to stripPointerCasts() to ptr above
2002 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
2003 return {};
2004 } else {
2006 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
2007 }
2008 pushTemporaryCleanup(*this, m, e, object);
2009
2010 // Perform derived-to-base casts and/or field accesses, to get from the
2011 // temporary object we created (and, potentially, for which we extended
2012 // the lifetime) to the subobject we're binding the reference to.
2013 if (!adjustments.empty()) {
2014 cgm.errorNYI(e->getSourceRange(),
2015 "emitMaterializeTemporaryExpr: Adjustments");
2016 return {};
2017 }
2018
2019 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
2020}
2021
2022LValue
2025
2026 auto it = opaqueLValues.find(e);
2027 if (it != opaqueLValues.end())
2028 return it->second;
2029
2030 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
2031 return emitLValue(e->getSourceExpr());
2032}
2033
2034RValue
2037
2038 auto it = opaqueRValues.find(e);
2039 if (it != opaqueRValues.end())
2040 return it->second;
2041
2042 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
2043 return emitAnyExpr(e->getSourceExpr());
2044}
2045
2047 if (e->isFileScope()) {
2048 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
2049 return {};
2050 }
2051
2052 if (e->getType()->isVariablyModifiedType())
2054
2055 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
2056 ".compoundliteral");
2057 const Expr *initExpr = e->getInitializer();
2058 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
2059
2060 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
2061 /*Init*/ true);
2062
2063 // Block-scope compound literals are destroyed at the end of the enclosing
2064 // scope in C.
2065 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
2066 cgm.errorNYI(e->getSourceRange(),
2067 "emitCompoundLiteralLValue: non C++ DestructedType");
2068 return {};
2069 }
2070
2071 return result;
2072}
2073
2075 RValue rv = emitCallExpr(e);
2076
2077 if (!rv.isScalar())
2078 return makeAddrLValue(rv.getAggregateAddress(), e->getType(),
2080
2081 assert(e->getCallReturnType(getContext())->isReferenceType() &&
2082 "Can't have a scalar return unless the return type is a "
2083 "reference type!");
2084
2086}
2087
2089 // Comma expressions just emit their LHS then their RHS as an l-value.
2090 if (e->getOpcode() == BO_Comma) {
2091 emitIgnoredExpr(e->getLHS());
2092 return emitLValue(e->getRHS());
2093 }
2094
2095 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
2097
2098 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
2099
2100 // Note that in all of these cases, __block variables need the RHS
2101 // evaluated first just in case the variable gets moved by the RHS.
2102
2104 case cir::TEK_Scalar: {
2106 if (e->getLHS()->getType().getObjCLifetime() !=
2108 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
2109 return {};
2110 }
2111
2112 RValue rv = emitAnyExpr(e->getRHS());
2113 LValue lv = emitLValue(e->getLHS());
2114
2115 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
2116 if (lv.isBitField())
2118 else
2119 emitStoreThroughLValue(rv, lv);
2120
2121 if (getLangOpts().OpenMP) {
2122 cgm.errorNYI(e->getSourceRange(), "openmp");
2123 return {};
2124 }
2125
2126 return lv;
2127 }
2128
2129 case cir::TEK_Complex: {
2131 }
2132
2133 case cir::TEK_Aggregate:
2134 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
2135 return {};
2136 }
2137 llvm_unreachable("bad evaluation kind");
2138}
2139
2140/// Emit code to compute the specified expression which
2141/// can have any type. The result is returned as an RValue struct.
2143 bool ignoreResult) {
2145 case cir::TEK_Scalar:
2146 return RValue::get(emitScalarExpr(e, ignoreResult));
2147 case cir::TEK_Complex:
2149 case cir::TEK_Aggregate: {
2150 if (!ignoreResult && aggSlot.isIgnored())
2151 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
2153 emitAggExpr(e, aggSlot);
2154 return aggSlot.asRValue();
2155 }
2156 }
2157 llvm_unreachable("bad evaluation kind");
2158}
2159
2160// Detect the unusual situation where an inline version is shadowed by a
2161// non-inline version. In that case we should pick the external one
2162// everywhere. That's GCC behavior too.
2164 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
2165 if (!pd->isInlineBuiltinDeclaration())
2166 return false;
2167 return true;
2168}
2169
2170CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
2171 const auto *fd = cast<FunctionDecl>(gd.getDecl());
2172
2173 if (unsigned builtinID = fd->getBuiltinID()) {
2174 StringRef ident = cgm.getMangledName(gd);
2175 std::string fdInlineName = (ident + ".inline").str();
2176
2177 bool isPredefinedLibFunction =
2178 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
2179 // TODO: Read no-builtin function attribute and set this accordingly.
2180 // Using false here matches OGCG's default behavior - builtins are called
2181 // as builtins unless explicitly disabled. The previous value of true was
2182 // overly conservative and caused functions to be marked as no_inline when
2183 // they shouldn't be.
2184 bool hasAttributeNoBuiltin = false;
2186
2187 // When directing calling an inline builtin, call it through it's mangled
2188 // name to make it clear it's not the actual builtin.
2189 auto fn = cast<cir::FuncOp>(curFn);
2190 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
2191 cir::FuncOp clone =
2192 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
2193
2194 if (!clone) {
2195 // Create a forward declaration - the body will be generated in
2196 // generateCode when the function definition is processed
2197 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
2198 mlir::OpBuilder::InsertionGuard guard(builder);
2199 builder.setInsertionPointToStart(cgm.getModule().getBody());
2200
2201 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
2202 calleeFunc.getFunctionType());
2203 cgm.insertGlobalSymbol(clone);
2204 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
2205 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
2206 clone.setSymVisibility("private");
2207 clone.setInlineKind(cir::InlineKind::AlwaysInline);
2208 }
2209 return CIRGenCallee::forDirect(clone, gd);
2210 }
2211
2212 // Replaceable builtins provide their own implementation of a builtin. If we
2213 // are in an inline builtin implementation, avoid trivial infinite
2214 // recursion. Honor __attribute__((no_builtin("foo"))) or
2215 // __attribute__((no_builtin)) on the current function unless foo is
2216 // not a predefined library function which means we must generate the
2217 // builtin no matter what.
2218 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
2219 return CIRGenCallee::forBuiltin(builtinID, fd);
2220 }
2221
2222 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
2223
2224 if ((cgm.getLangOpts().CUDA || cgm.getLangOpts().HIP) &&
2225 !cgm.getLangOpts().CUDAIsDevice && fd->hasAttr<CUDAGlobalAttr>()) {
2226 mlir::Operation *handle = cgm.getCUDARuntime().getKernelHandle(callee, gd);
2227 callee =
2228 mlir::cast<cir::FuncOp>(*cgm.getCUDARuntime().getKernelStub(handle));
2229 }
2230
2231 return CIRGenCallee::forDirect(callee, gd);
2232}
2233
2235 if (ty->isVoidType())
2236 return RValue::get(nullptr);
2237
2238 cgm.errorNYI("unsupported type for undef rvalue");
2239 return RValue::get(nullptr);
2240}
2241
2243 const CIRGenCallee &origCallee,
2244 const clang::CallExpr *e,
2246 // Get the actual function type. The callee type will always be a pointer to
2247 // function type or a block pointer type.
2248 assert(calleeTy->isFunctionPointerType() &&
2249 "Callee must have function pointer type!");
2250
2251 calleeTy = getContext().getCanonicalType(calleeTy);
2252 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
2253
2254 CIRGenCallee callee = origCallee;
2255
2256 if (getLangOpts().CPlusPlus)
2258
2259 const auto *fnType = cast<FunctionType>(pointeeTy);
2260
2262
2263 CallArgList args;
2265
2266 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
2267 e->getDirectCallee());
2268
2269 const CIRGenFunctionInfo &funcInfo =
2270 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
2271
2272 // C99 6.5.2.2p6:
2273 // If the expression that denotes the called function has a type that does
2274 // not include a prototype, [the default argument promotions are performed].
2275 // If the number of arguments does not equal the number of parameters, the
2276 // behavior is undefined. If the function is defined with a type that
2277 // includes a prototype, and either the prototype ends with an ellipsis (,
2278 // ...) or the types of the arguments after promotion are not compatible
2279 // with the types of the parameters, the behavior is undefined. If the
2280 // function is defined with a type that does not include a prototype, and
2281 // the types of the arguments after promotion are not compatible with those
2282 // of the parameters after promotion, the behavior is undefined [except in
2283 // some trivial cases].
2284 // That is, in the general case, we should assume that a call through an
2285 // unprototyped function type works like a *non-variadic* call. The way we
2286 // make this work is to cast to the exxact type fo the promoted arguments.
2287 if (isa<FunctionNoProtoType>(fnType)) {
2290 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
2291 // get non-variadic function type
2292 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
2293 calleeTy.getReturnType(), false);
2294 auto calleePtrTy = cir::PointerType::get(calleeTy);
2295
2296 mlir::Operation *fn = callee.getFunctionPointer();
2297 mlir::Value addr;
2298 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
2299 addr = cir::GetGlobalOp::create(
2300 builder, getLoc(e->getSourceRange()),
2301 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
2302 } else {
2303 addr = fn->getResult(0);
2304 }
2305
2306 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
2307 callee.setFunctionPointer(fn);
2308 }
2309
2311 assert(!cir::MissingFeatures::hip());
2313
2314 cir::CIRCallOpInterface callOp;
2315 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
2316 getLoc(e->getExprLoc()));
2317
2319
2320 return callResult;
2321}
2322
2324 e = e->IgnoreParens();
2325
2326 // Look through function-to-pointer decay.
2327 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2328 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2329 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2330 return emitCallee(implicitCast->getSubExpr());
2331 }
2332 // When performing an indirect call through a function pointer lvalue, the
2333 // function pointer lvalue is implicitly converted to an rvalue through an
2334 // lvalue-to-rvalue conversion.
2335 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2336 "unexpected implicit cast on function pointers");
2337 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2338 // Resolve direct calls.
2339 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2340 return emitDirectCallee(funcDecl);
2341 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2342 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2343 emitIgnoredExpr(me->getBase());
2344 return emitDirectCallee(fd);
2345 }
2346 // Else fall through to the indirect reference handling below.
2347 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2349 }
2350
2351 // Otherwise, we have an indirect reference.
2352 mlir::Value calleePtr;
2354 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2355 calleePtr = emitScalarExpr(e);
2356 functionType = ptrType->getPointeeType();
2357 } else {
2358 functionType = e->getType();
2359 calleePtr = emitLValue(e).getPointer();
2360 }
2361 assert(functionType->isFunctionType());
2362
2363 GlobalDecl gd;
2364 if (const auto *vd =
2365 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2366 gd = GlobalDecl(vd);
2367
2368 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2369 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2370 return callee;
2371}
2372
2376
2377 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2379
2380 if (const auto *cudaKernelCallExpr = dyn_cast<CUDAKernelCallExpr>(e))
2382
2383 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2384 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2385 // operator member call.
2386 if (const CXXMethodDecl *md =
2387 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2388 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2389 // A CXXOperatorCallExpr is created even for explicit object methods, but
2390 // these should be treated like static function calls. Fall through to do
2391 // that.
2392 }
2393
2394 CIRGenCallee callee = emitCallee(e->getCallee());
2395
2396 if (callee.isBuiltin())
2397 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2398 returnValue);
2399
2400 if (callee.isPseudoDestructor())
2402
2403 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2404}
2405
2406/// Emit code to compute the specified expression, ignoring the result.
2408 if (e->isPRValue()) {
2409 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2410 return;
2411 }
2412
2413 // Just emit it as an l-value and drop the result.
2414 emitLValue(e);
2415}
2416
2418 LValueBaseInfo *baseInfo) {
2420 assert(e->getType()->isArrayType() &&
2421 "Array to pointer decay must have array source type!");
2422
2423 // Expressions of array type can't be bitfields or vector elements.
2424 LValue lv = emitLValue(e);
2425 Address addr = lv.getAddress();
2426
2427 // If the array type was an incomplete type, we need to make sure
2428 // the decay ends up being the right type.
2429 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2430
2431 if (e->getType()->isVariableArrayType())
2432 return addr;
2433
2434 [[maybe_unused]] auto pointeeTy =
2435 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2436
2437 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2438 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2439 assert(pointeeTy == arrayTy);
2440
2441 // The result of this decay conversion points to an array element within the
2442 // base lvalue. However, since TBAA currently does not support representing
2443 // accesses to elements of member arrays, we conservatively represent accesses
2444 // to the pointee object as if it had no any base lvalue specified.
2445 // TODO: Support TBAA for member arrays.
2448
2449 mlir::Value ptr = builder.maybeBuildArrayDecay(
2450 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2451 convertTypeForMem(eltType));
2452 return Address(ptr, addr.getAlignment());
2453}
2454
2455/// Given the address of a temporary variable, produce an r-value of its type.
2459 switch (getEvaluationKind(type)) {
2460 case cir::TEK_Complex:
2461 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2462 case cir::TEK_Aggregate:
2463 return lvalue.asAggregateRValue();
2464 case cir::TEK_Scalar:
2465 return RValue::get(emitLoadOfScalar(lvalue, loc));
2466 }
2467 llvm_unreachable("bad evaluation kind");
2468}
2469
2470/// Emit an `if` on a boolean condition, filling `then` and `else` into
2471/// appropriated regions.
2472mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2473 const Stmt *thenS,
2474 const Stmt *elseS) {
2475 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2476 std::optional<mlir::Location> elseLoc;
2477 if (elseS)
2478 elseLoc = getLoc(elseS->getSourceRange());
2479
2480 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2482 cond, /*thenBuilder=*/
2483 [&](mlir::OpBuilder &, mlir::Location) {
2484 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2485 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2486 },
2487 thenLoc,
2488 /*elseBuilder=*/
2489 [&](mlir::OpBuilder &, mlir::Location) {
2490 assert(elseLoc && "Invalid location for elseS.");
2491 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2492 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2493 },
2494 elseLoc);
2495
2496 return mlir::LogicalResult::success(resThen.succeeded() &&
2497 resElse.succeeded());
2498}
2499
2500/// Emit an `if` on a boolean condition, filling `then` and `else` into
2501/// appropriated regions.
2503 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2504 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2505 std::optional<mlir::Location> elseLoc) {
2506 // Attempt to be as accurate as possible with IfOp location, generate
2507 // one fused location that has either 2 or 4 total locations, depending
2508 // on else's availability.
2509 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2510 if (elseLoc)
2511 ifLocs.push_back(*elseLoc);
2512 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2513
2514 // Emit the code with the fully general case.
2515 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2516 cir::IfOp ifOp = cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2517 /*thenBuilder=*/thenBuilder,
2518 /*elseBuilder=*/elseBuilder);
2519 terminateStructuredRegionBody(ifOp.getThenRegion(), thenLoc);
2520 assert((elseLoc.has_value() || ifOp.getElseRegion().empty()) &&
2521 "else region created with no else location");
2522 if (elseLoc.has_value())
2523 terminateStructuredRegionBody(ifOp.getElseRegion(), *elseLoc);
2524 return ifOp;
2525}
2526
2527/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2528mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2529 const Expr *cond) {
2532 cond = cond->IgnoreParens();
2533
2534 // In LLVM the condition is reversed here for efficient codegen.
2535 // This should be done in CIR prior to LLVM lowering, if we do now
2536 // we can make CIR based diagnostics misleading.
2537 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2539
2540 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2541 Expr *trueExpr = condOp->getTrueExpr();
2542 Expr *falseExpr = condOp->getFalseExpr();
2543 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2544
2545 mlir::Value ternaryOpRes =
2546 cir::TernaryOp::create(
2547 builder, loc, condV, /*thenBuilder=*/
2548 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2549 mlir::Value lhs = emitScalarExpr(trueExpr);
2550 cir::YieldOp::create(b, loc, lhs);
2551 },
2552 /*elseBuilder=*/
2553 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2554 mlir::Value rhs = emitScalarExpr(falseExpr);
2555 cir::YieldOp::create(b, loc, rhs);
2556 })
2557 .getResult();
2558
2559 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2560 getContext().BoolTy, condOp->getExprLoc());
2561 }
2562
2563 if (isa<CXXThrowExpr>(cond)) {
2564 cgm.errorNYI("NYI");
2565 return createDummyValue(loc, cond->getType());
2566 }
2567
2568 // If the branch has a condition wrapped by __builtin_unpredictable,
2569 // create metadata that specifies that the branch is unpredictable.
2570 // Don't bother if not optimizing because that metadata would not be used.
2572
2573 // Emit the code with the fully general case.
2574 return evaluateExprAsBool(cond);
2575}
2576
2577mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2578 mlir::Location loc, CharUnits alignment,
2579 bool insertIntoFnEntryBlock,
2580 mlir::Value arraySize) {
2581 mlir::Block *entryBlock = insertIntoFnEntryBlock
2583 : curLexScope->getEntryBlock();
2584
2585 // If this is an alloca in the entry basic block of a cir.try and there's
2586 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
2587 // scope instead. This is necessary in order to guarantee all SSA values are
2588 // reachable during cleanups.
2589 if (auto tryOp =
2590 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2591 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2592 entryBlock = &scopeOp.getScopeRegion().front();
2593 }
2594
2595 return emitAlloca(name, ty, loc, alignment,
2596 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2597}
2598
2599mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2600 mlir::Location loc, CharUnits alignment,
2601 mlir::OpBuilder::InsertPoint ip,
2602 mlir::Value arraySize) {
2603 // CIR uses its own alloca address space rather than follow the target data
2604 // layout like original CodeGen. The data layout awareness should be done in
2605 // the lowering pass instead.
2606 cir::PointerType localVarPtrTy =
2608 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2609
2610 mlir::Value addr;
2611 {
2612 mlir::OpBuilder::InsertionGuard guard(builder);
2613 builder.restoreInsertionPoint(ip);
2614 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2615 /*var type*/ ty, name, alignIntAttr, arraySize);
2617 }
2618 return addr;
2619}
2620
2621// Note: this function also emit constructor calls to support a MSVC extensions
2622// allowing explicit constructor function call.
2625 const Expr *callee = ce->getCallee()->IgnoreParens();
2626
2627 if (isa<BinaryOperator>(callee))
2629
2630 const auto *me = cast<MemberExpr>(callee);
2631 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2632
2633 if (md->isStatic()) {
2634 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2635 return RValue::get(nullptr);
2636 }
2637
2638 bool hasQualifier = me->hasQualifier();
2639 NestedNameSpecifier qualifier = me->getQualifier();
2640 bool isArrow = me->isArrow();
2641 const Expr *base = me->getBase();
2642
2644 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2645}
2646
2648 // Emit the expression as an lvalue.
2649 LValue lv = emitLValue(e);
2650 assert(lv.isSimple());
2651 mlir::Value value = lv.getPointer();
2652
2654
2655 return RValue::get(value);
2656}
2657
2659 LValueBaseInfo *pointeeBaseInfo) {
2660 if (refLVal.isVolatile())
2661 cgm.errorNYI(loc, "load of volatile reference");
2662
2663 cir::LoadOp load =
2664 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2665 refLVal.getAddress().getPointer());
2666
2668
2669 QualType pointeeType = refLVal.getType()->getPointeeType();
2670 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2671 return Address(load, convertTypeForMem(pointeeType), align);
2672}
2673
2675 mlir::Location loc,
2676 QualType refTy,
2677 AlignmentSource source) {
2678 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2679 LValueBaseInfo pointeeBaseInfo;
2681 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2682 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2683 pointeeBaseInfo);
2684}
2685
2686void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2687 cir::TrapOp::create(builder, loc);
2688 if (createNewBlock)
2689 builder.createBlock(builder.getBlock()->getParent());
2690}
2691
2693 bool createNewBlock) {
2695 cir::UnreachableOp::create(builder, getLoc(loc));
2696 if (createNewBlock)
2697 builder.createBlock(builder.getBlock()->getParent());
2698}
2699
2700mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2701 clang::QualType qt) {
2702 mlir::Type t = convertType(qt);
2703 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2704 return builder.createDummyValue(loc, t, alignment);
2705}
2706
2707//===----------------------------------------------------------------------===//
2708// CIR builder helpers
2709//===----------------------------------------------------------------------===//
2710
2712 const Twine &name, Address *alloca,
2713 mlir::OpBuilder::InsertPoint ip) {
2714 // FIXME: Should we prefer the preferred type alignment here?
2715 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2716 alloca, ip);
2717}
2718
2720 mlir::Location loc, const Twine &name,
2721 Address *alloca,
2722 mlir::OpBuilder::InsertPoint ip) {
2723 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2724 /*ArraySize=*/nullptr, alloca, ip);
2725 if (ty->isConstantMatrixType()) {
2727 cgm.errorNYI(loc, "temporary matrix value");
2728 }
2729 return result;
2730}
2731
2732/// This creates a alloca and inserts it into the entry block of the
2733/// current region.
2735 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2736 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2737 cir::AllocaOp alloca = ip.isSet()
2738 ? createTempAlloca(ty, loc, name, ip, arraySize)
2739 : createTempAlloca(ty, loc, name, arraySize);
2740 alloca.setAlignmentAttr(cgm.getSize(align));
2741 return Address(alloca, ty, align);
2742}
2743
2744/// This creates a alloca and inserts it into the entry block. The alloca is
2745/// casted to default address space if necessary.
2746// TODO(cir): Implement address space casting to match classic codegen's
2747// CreateTempAlloca behavior with DestLangAS parameter
2749 mlir::Location loc, const Twine &name,
2750 mlir::Value arraySize,
2751 Address *allocaAddr,
2752 mlir::OpBuilder::InsertPoint ip) {
2753 Address alloca =
2754 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2755 if (allocaAddr)
2756 *allocaAddr = alloca;
2757 mlir::Value v = alloca.getPointer();
2758 // Alloca always returns a pointer in alloca address space, which may
2759 // be different from the type defined by the language. For example,
2760 // in C++ the auto variables are in the default address space. Therefore
2761 // cast alloca to the default address space when necessary.
2762
2763 cir::PointerType dstTy;
2765 dstTy = builder.getPointerTo(ty, getCIRAllocaAddressSpace());
2766 else
2767 dstTy = builder.getPointerTo(ty, clang::LangAS::Default);
2768 v = performAddrSpaceCast(v, dstTy);
2769
2770 return Address(v, ty, align);
2771}
2772
2773/// This creates an alloca and inserts it into the entry block if \p ArraySize
2774/// is nullptr, otherwise inserts it at the current insertion point of the
2775/// builder.
2776cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2777 mlir::Location loc,
2778 const Twine &name,
2779 mlir::Value arraySize,
2780 bool insertIntoFnEntryBlock) {
2781 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2782 insertIntoFnEntryBlock, arraySize)
2783 .getDefiningOp());
2784}
2785
2786/// This creates an alloca and inserts it into the provided insertion point
2787cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2788 mlir::Location loc,
2789 const Twine &name,
2790 mlir::OpBuilder::InsertPoint ip,
2791 mlir::Value arraySize) {
2792 assert(ip.isSet() && "Insertion point is not set");
2793 return mlir::cast<cir::AllocaOp>(
2794 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2795 .getDefiningOp());
2796}
2797
2798/// CreateDefaultAlignTempAlloca - This creates an alloca with the
2799/// default alignment of the corresponding LLVM type, which is *not*
2800/// guaranteed to be related in any way to the expected alignment of
2801/// an AST type that might have been lowered to Ty.
2803 mlir::Location loc,
2804 const Twine &name) {
2805 CharUnits align =
2806 CharUnits::fromQuantity(cgm.getDataLayout().getABITypeAlign(ty));
2807 return createTempAlloca(ty, align, loc, name);
2808}
2809
2810/// Try to emit a reference to the given value without producing it as
2811/// an l-value. For many cases, this is just an optimization, but it avoids
2812/// us needing to emit global copies of variables if they're named without
2813/// triggering a formal use in a context where we can't emit a direct
2814/// reference to them, for instance if a block or lambda or a member of a
2815/// local class uses a const int variable or constexpr variable from an
2816/// enclosing function.
2817///
2818/// For named members of enums, this is the only way they are emitted.
2821 const ValueDecl *value = refExpr->getDecl();
2822
2823 // There is a lot more to do here, but for now only EnumConstantDecl is
2824 // supported.
2826
2827 // The value needs to be an enum constant or a constant variable.
2828 if (!isa<EnumConstantDecl>(value))
2829 return ConstantEmission();
2830
2831 Expr::EvalResult result;
2832 if (!refExpr->EvaluateAsRValue(result, getContext()))
2833 return ConstantEmission();
2834
2835 QualType resultType = refExpr->getType();
2836
2837 // As long as we're only handling EnumConstantDecl, there should be no
2838 // side-effects.
2839 assert(!result.HasSideEffects);
2840
2841 // Emit as a constant.
2842 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2843 // somewhat heavy refactoring...)
2844 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2845 refExpr->getLocation(), result.Val, resultType);
2846 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2847 assert(cstToEmit && "expected a typed attribute");
2848
2850
2851 return ConstantEmission::forValue(cstToEmit);
2852}
2853
2857 return tryEmitAsConstant(dre);
2858 return ConstantEmission();
2859}
2860
2862 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2863 assert(constant && "not a constant");
2864 if (constant.isReference()) {
2865 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2866 return {};
2867 }
2868 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2869}
2870
2872 const StringLiteral *sl = e->getFunctionName();
2873 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2874 auto fn = cast<cir::FuncOp>(curFn);
2875 StringRef fnName = fn.getName();
2876 fnName.consume_front("\01");
2877 std::array<StringRef, 2> nameItems = {
2879 std::string gvName = llvm::join(nameItems, ".");
2880 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2881 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2882
2883 return emitStringLiteralLValue(sl, gvName);
2884}
2885
2890
2891namespace {
2892// Handle the case where the condition is a constant evaluatable simple integer,
2893// which means we don't have to separately handle the true/false blocks.
2894std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2896 const Expr *condExpr = e->getCond();
2897 llvm::APSInt condExprVal;
2898 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2899 return std::nullopt;
2900
2901 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2902 if (!condExprVal.getBoolValue())
2903 std::swap(live, dead);
2904
2905 if (cgf.containsLabel(dead))
2906 return std::nullopt;
2907
2908 // If the true case is live, we need to track its region.
2911 // If a throw expression we emit it and return an undefined lvalue
2912 // because it can't be used.
2913 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2914 cgf.emitCXXThrowExpr(throwExpr);
2915 // Return an undefined lvalue - the throw terminates execution
2916 // so this value will never actually be used
2917 mlir::Type elemTy = cgf.convertType(dead->getType());
2918 mlir::Value undefPtr =
2919 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2920 cgf.getLoc(throwExpr->getSourceRange()));
2921 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2922 dead->getType());
2923 }
2924 return cgf.emitLValue(live);
2925}
2926
2927/// Emit the operand of a glvalue conditional operator. This is either a glvalue
2928/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
2929/// LValue is returned and the current block has been terminated.
2930static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2931 const Expr *operand) {
2932 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2933 cgf.emitCXXThrowExpr(throwExpr);
2934 return std::nullopt;
2935 }
2936
2937 return cgf.emitLValue(operand);
2938}
2939} // namespace
2940
2941// Create and generate the 3 blocks for a conditional operator.
2942// Leaves the 'current block' in the continuation basic block.
2943template <typename FuncTy>
2946 const FuncTy &branchGenFunc) {
2947 ConditionalInfo info;
2948 ConditionalEvaluation eval(*this);
2949 mlir::Location loc = getLoc(e->getSourceRange());
2950 CIRGenBuilderTy &builder = getBuilder();
2951
2952 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2954 mlir::Type yieldTy{};
2955
2956 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2957 const Expr *expr, std::optional<LValue> &resultLV) {
2958 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2959 curLexScope->setAsTernary();
2960
2962 eval.beginEvaluation();
2963 resultLV = branchGenFunc(*this, expr);
2964 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2965 eval.endEvaluation();
2966
2967 if (resultPtr) {
2968 yieldTy = resultPtr.getType();
2969 cir::YieldOp::create(b, loc, resultPtr);
2970 } else {
2971 // If LHS or RHS is a void expression we need
2972 // to patch arms as to properly match yield types.
2973 // If the current block's terminator is an UnreachableOp (from a throw),
2974 // we don't need a yield
2975 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2976 mlir::Operation *terminator =
2977 builder.getInsertionBlock()->getTerminator();
2978 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2979 insertPoints.push_back(b.saveInsertionPoint());
2980 }
2981 }
2982 };
2983
2984 info.result = cir::TernaryOp::create(
2985 builder, loc, condV,
2986 /*trueBuilder=*/
2987 [&](mlir::OpBuilder &b, mlir::Location loc) {
2988 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2989 },
2990 /*falseBuilder=*/
2991 [&](mlir::OpBuilder &b, mlir::Location loc) {
2992 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2993 })
2994 .getResult();
2995
2996 // If both arms are void, so be it.
2997 if (!yieldTy)
2998 yieldTy = voidTy;
2999
3000 // Insert required yields.
3001 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
3002 mlir::OpBuilder::InsertionGuard guard(builder);
3003 builder.restoreInsertionPoint(toInsert);
3004
3005 // Block does not return: build empty yield.
3006 if (!yieldTy) {
3007 cir::YieldOp::create(builder, loc);
3008 } else { // Block returns: set null yield value.
3009 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
3010 cir::YieldOp::create(builder, loc, op0);
3011 }
3012 }
3013
3014 return info;
3015}
3016
3019 if (!expr->isGLValue()) {
3020 // ?: here should be an aggregate.
3021 assert(hasAggregateEvaluationKind(expr->getType()) &&
3022 "Unexpected conditional operator!");
3023 return emitAggExprToLValue(expr);
3024 }
3025
3026 OpaqueValueMapping binding(*this, expr);
3027 if (std::optional<LValue> res =
3028 handleConditionalOperatorLValueSimpleCase(*this, expr))
3029 return *res;
3030
3031 ConditionalInfo info =
3032 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
3033 return emitLValueOrThrowExpression(cgf, e);
3034 });
3035
3036 if ((info.lhs && !info.lhs->isSimple()) ||
3037 (info.rhs && !info.rhs->isSimple())) {
3038 cgm.errorNYI(expr->getSourceRange(),
3039 "unsupported conditional operator with non-simple lvalue");
3040 return LValue();
3041 }
3042
3043 if (info.lhs && info.rhs) {
3044 Address lhsAddr = info.lhs->getAddress();
3045 Address rhsAddr = info.rhs->getAddress();
3046 Address result(info.result, lhsAddr.getElementType(),
3047 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
3048 AlignmentSource alignSource =
3049 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
3050 info.rhs->getBaseInfo().getAlignmentSource());
3052 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
3053 }
3054
3055 assert((info.lhs || info.rhs) &&
3056 "both operands of glvalue conditional are throw-expressions?");
3057 return info.lhs ? *info.lhs : *info.rhs;
3058}
3059
3060/// An LValue is a candidate for having its loads and stores be made atomic if
3061/// we are operating under /volatile:ms *and* the LValue itself is volatile and
3062/// performing such an operation can be performed without a libcall.
3064 if (!cgm.getLangOpts().MSVolatile)
3065 return false;
3066
3067 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
3068 return false;
3069}
3070
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
static Address emitAddrOfZeroSizeField(CIRGenFunction &cgf, Address base, const FieldDecl *field)
Get the address of a zero-sized field within a record.
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::GetGlobalOp createGetGlobal(mlir::Location loc, cir::GlobalOp global, bool threadLocal=false)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2767
SourceLocation getEndLoc() const
Definition Expr.h:2770
QualType getElementType() const
Definition TypeBase.h:3789
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Type getType() const
Definition Address.h:115
bool isValid() const
Definition Address.h:75
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:139
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
cir::ConstantOp getUInt64(uint64_t c, mlir::Location loc)
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:123
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:185
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:99
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:127
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
unsigned getBuiltinID() const
Definition CIRGenCall.h:103
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:108
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:147
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:117
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void emitStoreThroughExtVectorComponentLValue(RValue src, LValue dst)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
RValue emitAtomicLoad(LValue lvalue, SourceLocation loc, AggValueSlot slot=AggValueSlot::ignored())
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc)
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd)
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Operation * getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *mte, const Expr *init)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
mlir::Value getVectorPointer() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
bool isMatrixRow() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
RValue asAggregateRValue() const
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a C++ destructor within a class.
Definition DeclCXX.h:2882
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:183
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2132
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3766
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3330
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConditionalOperator - The ?
Definition Expr.h:4394
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
SourceLocation getLocation() const
Definition DeclBase.h:447
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:576
DeclContext * getDeclContext()
Definition DeclBase.h:456
bool hasAttr() const
Definition DeclBase.h:585
virtual SourceRange getSourceRange() const LLVM_READONLY
Source range that this declaration covers.
Definition DeclBase.h:435
const Expr * getBase() const
Definition Expr.h:6582
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6611
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4436
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4549
Represents a member of a struct/union/class.
Definition Decl.h:3178
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3281
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4821
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3263
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3414
bool isPotentiallyOverlapping() const
Determine if this field is of potentially-overlapping class type, that is, subobject with the [[no_un...
Definition Decl.cpp:4799
Represents a function declaration or definition.
Definition Decl.h:2018
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8476
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1449
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1190
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1556
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4343
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3946
Exposes information about the current target.
Definition TargetInfo.h:227
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:9039
bool isBooleanType() const
Definition TypeBase.h:9176
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2289
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:455
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9342
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8772
bool isFunctionPointerType() const
Definition TypeBase.h:8740
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2422
bool isConstantMatrixType() const
Definition TypeBase.h:8840
bool isPointerType() const
Definition TypeBase.h:8673
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9333
bool isReferenceType() const
Definition TypeBase.h:8697
bool isVariableArrayType() const
Definition TypeBase.h:8784
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isExtVectorBoolType() const
Definition TypeBase.h:8820
bool isAnyComplexType() const
Definition TypeBase.h:8808
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9219
bool isAtomicType() const
Definition TypeBase.h:8865
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2855
bool isFunctionType() const
Definition TypeBase.h:8669
bool isVectorType() const
Definition TypeBase.h:8812
bool isSubscriptableVectorType() const
Definition TypeBase.h:8832
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2444
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2322
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:924
TLSKind getTLSKind() const
Definition Decl.cpp:2147
bool hasInit() const
Definition Decl.cpp:2377
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2345
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1182
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:950
@ TLS_None
Not a TLS variable.
Definition Decl.h:944
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
Represents a GCC generic vector type.
Definition TypeBase.h:4230
Defines the clang::TargetInfo interface.
mlir::ptr::MemorySpaceAttrInterface toCIRAddressSpaceAttr(mlir::MLIRContext &ctx, clang::LangAS langAS)
Convert an AST LangAS to the appropriate CIR address space attribute interface.
OverflowBehavior
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, CUDAKernelCallExpr > cudaKernelCallExpr
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:258
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:343
@ SD_Static
Static storage duration.
Definition Specifiers.h:344
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:341
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:342
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:345
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:178
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:181
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615