27 CharUnits atomicAlign;
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (
auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
44 evaluationKind = cgf.getEvaluationKind(valueTy);
47 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
50 valueSizeInBits = valueTypeInfo.
Width;
51 atomicSizeInBits = atomicTypeInfo.
Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
60 this->lvalue = lvalue;
63 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
69 QualType getValueType()
const {
return valueTy; }
70 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
72 mlir::Value getAtomicPointer()
const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
78 Address getAtomicAddress()
const {
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
84 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
95 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
97 bool emitMemSetZeroIfNecessary()
const;
101 Address castToAtomicIntPointer(Address addr)
const;
106 Address convertToAtomicIntPointer(Address addr)
const;
109 void emitCopyIntoMemory(RValue rvalue)
const;
112 LValue projectValue()
const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
116 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
124 Address createTempAlloca()
const;
127 bool requiresMemSetZero(mlir::Type ty)
const;
143 uint64_t expectedSize) {
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
156 switch (getEvaluationKind()) {
163 mlir::cast<cir::ComplexType>(ty).getElementType(),
164 atomicSizeInBits / 2);
169 llvm_unreachable(
"bad evaluation kind");
172Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
175 if (sourceSizeInBits != atomicSizeInBits) {
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
181 return castToAtomicIntPointer(addr);
184Address AtomicInfo::createTempAlloca()
const {
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
188 getAtomicAlignment(), loc,
"atomic-temp");
191 if (lvalue.isBitField()) {
192 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
198Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
207bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
221 assert(lvalue.isSimple());
227 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
234 emitMemSetZeroIfNecessary();
237 LValue tempLValue = projectValue();
243 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
250 cir::MemOrder successOrder,
251 cir::MemOrder failureOrder) {
255 mlir::Value expected = builder.
createLoad(loc, val1);
256 mlir::Value desired = builder.
createLoad(loc, val2);
258 auto cmpxchg = cir::AtomicCmpXchg::create(
266 cmpxchg.setWeak(isWeak);
268 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
269 cir::IfOp::create(builder, loc, failed,
false,
270 [&](mlir::OpBuilder &, mlir::Location) {
271 auto ptrTy = mlir::cast<cir::PointerType>(
290 Expr *failureOrderExpr, uint64_t size,
291 cir::MemOrder successOrder) {
294 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
296 cir::MemOrder failureOrder;
298 failureOrder = cir::MemOrder::Relaxed;
300 switch ((cir::MemOrder)failureOrderInt) {
301 case cir::MemOrder::Relaxed:
304 case cir::MemOrder::Release:
305 case cir::MemOrder::AcquireRelease:
306 failureOrder = cir::MemOrder::Relaxed;
308 case cir::MemOrder::Consume:
309 case cir::MemOrder::Acquire:
310 failureOrder = cir::MemOrder::Acquire;
312 case cir::MemOrder::SequentiallyConsistent:
313 failureOrder = cir::MemOrder::SequentiallyConsistent;
329 "emitAtomicCmpXchgFailureSet: non-constant failure order");
334 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
335 cir::MemOrder order) {
336 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
339 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: atomic scope");
344 llvm::StringRef opName;
347 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
348 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
350 switch (
expr->getOp()) {
351 case AtomicExpr::AO__c11_atomic_init:
352 llvm_unreachable(
"already handled!");
354 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
356 val2, failureOrderExpr, size, order);
359 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
361 val2, failureOrderExpr, size, order);
364 case AtomicExpr::AO__atomic_compare_exchange:
365 case AtomicExpr::AO__atomic_compare_exchange_n: {
369 failureOrderExpr, size, order);
373 "emitAtomicOp: non-constant isWeak");
378 case AtomicExpr::AO__c11_atomic_load:
379 case AtomicExpr::AO__atomic_load_n:
380 case AtomicExpr::AO__atomic_load: {
386 load->setAttr(
"mem_order", orderAttr);
388 builder.
createStore(loc, load->getResult(0), dest);
392 case AtomicExpr::AO__c11_atomic_store:
393 case AtomicExpr::AO__atomic_store_n:
394 case AtomicExpr::AO__atomic_store: {
395 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
400 mlir::IntegerAttr{}, orderAttr);
404 case AtomicExpr::AO__c11_atomic_exchange:
405 case AtomicExpr::AO__atomic_exchange_n:
406 case AtomicExpr::AO__atomic_exchange:
407 opName = cir::AtomicXchg::getOperationName();
410 case AtomicExpr::AO__opencl_atomic_init:
412 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
413 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
415 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
416 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
418 case AtomicExpr::AO__scoped_atomic_compare_exchange:
419 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
421 case AtomicExpr::AO__opencl_atomic_load:
422 case AtomicExpr::AO__hip_atomic_load:
423 case AtomicExpr::AO__scoped_atomic_load_n:
424 case AtomicExpr::AO__scoped_atomic_load:
426 case AtomicExpr::AO__opencl_atomic_store:
427 case AtomicExpr::AO__hip_atomic_store:
428 case AtomicExpr::AO__scoped_atomic_store:
429 case AtomicExpr::AO__scoped_atomic_store_n:
431 case AtomicExpr::AO__hip_atomic_exchange:
432 case AtomicExpr::AO__opencl_atomic_exchange:
433 case AtomicExpr::AO__scoped_atomic_exchange_n:
434 case AtomicExpr::AO__scoped_atomic_exchange:
436 case AtomicExpr::AO__atomic_add_fetch:
437 case AtomicExpr::AO__scoped_atomic_add_fetch:
439 case AtomicExpr::AO__c11_atomic_fetch_add:
440 case AtomicExpr::AO__hip_atomic_fetch_add:
441 case AtomicExpr::AO__opencl_atomic_fetch_add:
442 case AtomicExpr::AO__atomic_fetch_add:
443 case AtomicExpr::AO__scoped_atomic_fetch_add:
445 case AtomicExpr::AO__atomic_sub_fetch:
446 case AtomicExpr::AO__scoped_atomic_sub_fetch:
448 case AtomicExpr::AO__c11_atomic_fetch_sub:
449 case AtomicExpr::AO__hip_atomic_fetch_sub:
450 case AtomicExpr::AO__opencl_atomic_fetch_sub:
451 case AtomicExpr::AO__atomic_fetch_sub:
452 case AtomicExpr::AO__scoped_atomic_fetch_sub:
454 case AtomicExpr::AO__atomic_min_fetch:
455 case AtomicExpr::AO__scoped_atomic_min_fetch:
457 case AtomicExpr::AO__c11_atomic_fetch_min:
458 case AtomicExpr::AO__hip_atomic_fetch_min:
459 case AtomicExpr::AO__opencl_atomic_fetch_min:
460 case AtomicExpr::AO__atomic_fetch_min:
461 case AtomicExpr::AO__scoped_atomic_fetch_min:
463 case AtomicExpr::AO__atomic_max_fetch:
464 case AtomicExpr::AO__scoped_atomic_max_fetch:
466 case AtomicExpr::AO__c11_atomic_fetch_max:
467 case AtomicExpr::AO__hip_atomic_fetch_max:
468 case AtomicExpr::AO__opencl_atomic_fetch_max:
469 case AtomicExpr::AO__atomic_fetch_max:
470 case AtomicExpr::AO__scoped_atomic_fetch_max:
472 case AtomicExpr::AO__atomic_and_fetch:
473 case AtomicExpr::AO__scoped_atomic_and_fetch:
475 case AtomicExpr::AO__c11_atomic_fetch_and:
476 case AtomicExpr::AO__hip_atomic_fetch_and:
477 case AtomicExpr::AO__opencl_atomic_fetch_and:
478 case AtomicExpr::AO__atomic_fetch_and:
479 case AtomicExpr::AO__scoped_atomic_fetch_and:
481 case AtomicExpr::AO__atomic_or_fetch:
482 case AtomicExpr::AO__scoped_atomic_or_fetch:
484 case AtomicExpr::AO__c11_atomic_fetch_or:
485 case AtomicExpr::AO__hip_atomic_fetch_or:
486 case AtomicExpr::AO__opencl_atomic_fetch_or:
487 case AtomicExpr::AO__atomic_fetch_or:
488 case AtomicExpr::AO__scoped_atomic_fetch_or:
490 case AtomicExpr::AO__atomic_xor_fetch:
491 case AtomicExpr::AO__scoped_atomic_xor_fetch:
493 case AtomicExpr::AO__c11_atomic_fetch_xor:
494 case AtomicExpr::AO__hip_atomic_fetch_xor:
495 case AtomicExpr::AO__opencl_atomic_fetch_xor:
496 case AtomicExpr::AO__atomic_fetch_xor:
497 case AtomicExpr::AO__scoped_atomic_fetch_xor:
499 case AtomicExpr::AO__atomic_nand_fetch:
500 case AtomicExpr::AO__scoped_atomic_nand_fetch:
502 case AtomicExpr::AO__c11_atomic_fetch_nand:
503 case AtomicExpr::AO__atomic_fetch_nand:
504 case AtomicExpr::AO__scoped_atomic_fetch_nand:
506 case AtomicExpr::AO__atomic_test_and_set:
508 case AtomicExpr::AO__atomic_clear:
513 assert(!opName.empty() &&
"expected operation name to build");
514 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
518 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
519 atomicOperands, atomicResTys);
521 rmwOp->setAttr(
"mem_order", orderAttr);
522 if (
expr->isVolatile())
523 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
525 mlir::Value result = rmwOp->getResult(0);
532 auto memOrder =
static_cast<cir::MemOrder
>(order);
534 return memOrder != cir::MemOrder::Consume &&
535 memOrder != cir::MemOrder::Acquire &&
536 memOrder != cir::MemOrder::AcquireRelease;
538 return memOrder != cir::MemOrder::Release &&
539 memOrder != cir::MemOrder::AcquireRelease;
547 memTy = ty->getValueType();
549 Expr *isWeakExpr =
nullptr;
550 Expr *orderFailExpr =
nullptr;
558 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
572 bool shouldCastToIntPtrTy =
true;
574 switch (e->
getOp()) {
579 case AtomicExpr::AO__c11_atomic_init:
580 llvm_unreachable(
"already handled above with emitAtomicInit");
582 case AtomicExpr::AO__atomic_load_n:
583 case AtomicExpr::AO__c11_atomic_load:
586 case AtomicExpr::AO__atomic_load:
590 case AtomicExpr::AO__atomic_store:
594 case AtomicExpr::AO__atomic_exchange:
599 case AtomicExpr::AO__atomic_compare_exchange:
600 case AtomicExpr::AO__atomic_compare_exchange_n:
601 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
602 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
604 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
605 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
610 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
611 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
612 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
613 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
617 case AtomicExpr::AO__atomic_exchange_n:
618 case AtomicExpr::AO__atomic_store_n:
619 case AtomicExpr::AO__c11_atomic_exchange:
620 case AtomicExpr::AO__c11_atomic_store:
633 if (shouldCastToIntPtrTy) {
634 ptr = atomics.castToAtomicIntPointer(ptr);
636 val1 = atomics.convertToAtomicIntPointer(val1);
639 if (shouldCastToIntPtrTy)
640 dest = atomics.castToAtomicIntPointer(dest);
644 dest = atomics.createTempAlloca();
645 if (shouldCastToIntPtrTy)
646 dest = atomics.castToAtomicIntPointer(dest);
649 bool powerOf2Size = (size & (size - 1)) == 0;
650 bool useLibCall = !powerOf2Size || (size > 16);
667 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
668 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
669 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
670 e->
getOp() == AtomicExpr::AO__atomic_store ||
671 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
672 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
673 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
674 e->
getOp() == AtomicExpr::AO__atomic_clear;
675 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
676 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
677 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
678 e->
getOp() == AtomicExpr::AO__atomic_load ||
679 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
680 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
681 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
687 uint64_t ord = orderConst.
Val.
getInt().getZExtValue();
689 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
690 size,
static_cast<cir::MemOrder
>(ord));
708 switch (atomics.getEvaluationKind()) {
726 llvm_unreachable(
"bad evaluation kind");
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
static bool atomicInfoGetAtomicPointer()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.