27 CharUnits atomicAlign;
30 bool useLibCall =
true;
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (
auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
45 evaluationKind = cgf.getEvaluationKind(valueTy);
48 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
51 valueSizeInBits = valueTypeInfo.
Width;
52 atomicSizeInBits = atomicTypeInfo.
Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
61 this->lvalue = lvalue;
64 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
67 atomicSizeInBits, ctx.
toBits(lvalue.getAlignment()));
70 QualType getValueType()
const {
return valueTy; }
71 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
73 mlir::Value getAtomicPointer()
const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
79 bool shouldUseLibCall()
const {
return useLibCall; }
80 const LValue &getAtomicLValue()
const {
return lvalue; }
81 Address getAtomicAddress()
const {
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
87 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
98 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
100 bool emitMemSetZeroIfNecessary()
const;
102 mlir::Value getScalarRValValueOrNull(RValue rvalue)
const;
106 Address castToAtomicIntPointer(Address addr)
const;
111 Address convertToAtomicIntPointer(Address addr)
const;
114 mlir::Value convertRValueToInt(RValue rvalue,
bool cmpxchg =
false)
const;
116 RValue convertToValueOrAtomic(mlir::Value intVal, AggValueSlot resultSlot,
117 SourceLocation loc,
bool asValue,
118 bool cmpxchg =
false)
const;
121 void emitCopyIntoMemory(RValue rvalue)
const;
124 LValue projectValue()
const {
125 assert(lvalue.isSimple());
126 Address addr = getAtomicAddress();
128 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
132 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
137 RValue emitAtomicLoad(AggValueSlot resultSlot, SourceLocation loc,
138 bool asValue, cir::MemOrder order,
bool isVolatile);
141 Address createTempAlloca()
const;
144 bool requiresMemSetZero(mlir::Type ty)
const;
147 mlir::Value emitAtomicLoadOp(cir::MemOrder order,
bool isVolatile,
148 bool cmpxchg =
false);
164 uint64_t expectedSize) {
171bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
177 switch (getEvaluationKind()) {
184 mlir::cast<cir::ComplexType>(ty).getElementType(),
185 atomicSizeInBits / 2);
190 llvm_unreachable(
"bad evaluation kind");
193Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
196 if (sourceSizeInBits != atomicSizeInBits) {
199 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
202 return castToAtomicIntPointer(addr);
205RValue AtomicInfo::emitAtomicLoad(AggValueSlot resultSlot, SourceLocation loc,
206 bool asValue, cir::MemOrder order,
209 if (shouldUseLibCall()) {
211 cgf.
cgm.
errorNYI(loc,
"emitAtomicLoad: emit atomic lib call");
216 mlir::Value loadOp = emitAtomicLoadOp(order, isVolatile);
224 return convertToValueOrAtomic(loadOp, resultSlot, loc, asValue);
227Address AtomicInfo::createTempAlloca()
const {
229 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
231 getAtomicAlignment(), loc,
"atomic-temp");
234 if (lvalue.isBitField()) {
235 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
241mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue)
const {
242 if (rvalue.
isScalar() && (!hasPadding() || !lvalue.isSimple()))
247Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
250 if (intTy && intTy.getWidth() == atomicSizeInBits)
256bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
257 assert(lvalue.isSimple());
258 Address addr = lvalue.getAddress();
263 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
273 if (cir::isAnyFloatingPointType(valueTy))
278mlir::Value AtomicInfo::emitAtomicLoadOp(cir::MemOrder order,
bool isVolatile,
280 Address addr = getAtomicAddress();
282 addr = castToAtomicIntPointer(addr);
286 op.setMemOrder(order);
292mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue,
bool cmpxchg)
const {
295 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
300 loc,
"AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
305 loc,
"AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
309RValue AtomicInfo::convertToValueOrAtomic(mlir::Value intVal,
310 AggValueSlot resultSlot,
311 SourceLocation loc,
bool asValue,
312 bool cmpxchg)
const {
314 assert((mlir::isa<cir::IntType, cir::PointerType, cir::FPTypeInterface>(
315 intVal.getType())) &&
316 "Expected integer, pointer or floating point value when converting "
319 !lvalue.isBitField() || lvalue.getBitFieldInfo().size == valueSizeInBits;
321 ((isWholeValue && !hasPadding()) || !asValue)) {
323 : getAtomicAddress().getElementType();
325 assert((!mlir::isa<cir::IntType>(valTy) || intVal.getType() == valTy) &&
326 "Different integer types.");
330 cgf.
cgm.
errorNYI(
"convertToValueOrAtomic: convert through bitcast");
334 cgf.
cgm.
errorNYI(
"convertToValueOrAtomic: convert through temp");
340void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
341 assert(lvalue.isSimple());
347 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
354 emitMemSetZeroIfNecessary();
357 LValue tempLValue = projectValue();
363 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
368 mlir::ArrayAttr valuesAttr = builder.getArrayAttr({});
369 mlir::OpBuilder::InsertPoint insertPoint;
370 cir::CaseOp::create(builder, loc, valuesAttr, cir::CaseOpKind::Default,
372 builder.restoreInsertionPoint(insertPoint);
378 mlir::Type orderType,
381 for (cir::MemOrder order : orders)
382 orderAttrs.push_back(cir::IntAttr::get(orderType,
static_cast<int>(order)));
383 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
385 mlir::OpBuilder::InsertPoint insertPoint;
386 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
388 builder.restoreInsertionPoint(insertPoint);
394 cir::MemOrder successOrder,
395 cir::MemOrder failureOrder,
396 cir::SyncScopeKind scope) {
400 mlir::Value expected = builder.
createLoad(loc, val1);
401 mlir::Value desired = builder.
createLoad(loc, val2);
403 auto cmpxchg = cir::AtomicCmpXchgOp::create(
412 cmpxchg.setWeak(isWeak);
414 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
415 cir::IfOp::create(builder, loc, failed,
false,
416 [&](mlir::OpBuilder &, mlir::Location) {
417 auto ptrTy = mlir::cast<cir::PointerType>(
436 Expr *failureOrderExpr, uint64_t size,
437 cir::MemOrder successOrder,
438 cir::SyncScopeKind scope) {
441 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
443 cir::MemOrder failureOrder;
445 failureOrder = cir::MemOrder::Relaxed;
447 switch ((cir::MemOrder)failureOrderInt) {
448 case cir::MemOrder::Relaxed:
451 case cir::MemOrder::Release:
452 case cir::MemOrder::AcquireRelease:
453 failureOrder = cir::MemOrder::Relaxed;
455 case cir::MemOrder::Consume:
456 case cir::MemOrder::Acquire:
457 failureOrder = cir::MemOrder::Acquire;
459 case cir::MemOrder::SequentiallyConsistent:
460 failureOrder = cir::MemOrder::SequentiallyConsistent;
470 failureOrder, scope);
478 mlir::Value failureOrderVal = cgf.
emitScalarExpr(failureOrderExpr);
480 cir::SwitchOp::create(
482 [&](mlir::OpBuilder &
b, mlir::Location loc, mlir::OperationState &os) {
483 mlir::Block *switchBlock = cgf.getBuilder().getBlock();
495 emitDefaultCaseLabel(cgf.getBuilder(), atomicLoc);
496 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
497 successOrder, cir::MemOrder::Relaxed, scope);
498 cgf.getBuilder().createBreak(atomicLoc);
499 cgf.getBuilder().setInsertionPointToEnd(switchBlock);
503 emitMemOrderCaseLabel(cgf.getBuilder(), loc, failureOrderVal.getType(),
504 {cir::MemOrder::Consume, cir::MemOrder::Acquire});
506 successOrder, cir::MemOrder::Acquire, scope);
508 cgf.
getBuilder().setInsertionPointToEnd(switchBlock);
512 {cir::MemOrder::SequentiallyConsistent});
514 successOrder, cir::MemOrder::SequentiallyConsistent,
517 cgf.
getBuilder().setInsertionPointToEnd(switchBlock);
525 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
526 cir::MemOrder order, cir::SyncScopeKind scope) {
528 llvm::StringRef opName;
531 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
532 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
533 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
534 cir::AtomicFetchKindAttr fetchAttr;
535 bool fetchFirst =
true;
537 switch (
expr->getOp()) {
538 case AtomicExpr::AO__c11_atomic_init:
539 llvm_unreachable(
"already handled!");
541 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
543 val2, failureOrderExpr, size, order, scope);
546 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
548 val2, failureOrderExpr, size, order, scope);
551 case AtomicExpr::AO__atomic_compare_exchange:
552 case AtomicExpr::AO__atomic_compare_exchange_n:
553 case AtomicExpr::AO__scoped_atomic_compare_exchange:
554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
558 failureOrderExpr, size, order, scope);
562 "emitAtomicOp: non-constant isWeak");
567 case AtomicExpr::AO__c11_atomic_load:
568 case AtomicExpr::AO__atomic_load_n:
569 case AtomicExpr::AO__atomic_load:
570 case AtomicExpr::AO__scoped_atomic_load_n:
571 case AtomicExpr::AO__scoped_atomic_load: {
575 load->setAttr(
"mem_order", orderAttr);
576 load->setAttr(
"sync_scope", scopeAttr);
578 builder.
createStore(loc, load->getResult(0), dest);
582 case AtomicExpr::AO__c11_atomic_store:
583 case AtomicExpr::AO__atomic_store_n:
584 case AtomicExpr::AO__atomic_store:
585 case AtomicExpr::AO__scoped_atomic_store:
586 case AtomicExpr::AO__scoped_atomic_store_n: {
587 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
592 mlir::IntegerAttr{}, scopeAttr, orderAttr);
596 case AtomicExpr::AO__c11_atomic_exchange:
597 case AtomicExpr::AO__atomic_exchange_n:
598 case AtomicExpr::AO__atomic_exchange:
599 case AtomicExpr::AO__scoped_atomic_exchange_n:
600 case AtomicExpr::AO__scoped_atomic_exchange:
601 opName = cir::AtomicXchgOp::getOperationName();
604 case AtomicExpr::AO__atomic_add_fetch:
605 case AtomicExpr::AO__scoped_atomic_add_fetch:
608 case AtomicExpr::AO__c11_atomic_fetch_add:
609 case AtomicExpr::AO__atomic_fetch_add:
610 case AtomicExpr::AO__scoped_atomic_fetch_add:
611 opName = cir::AtomicFetchOp::getOperationName();
612 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
613 cir::AtomicFetchKind::Add);
616 case AtomicExpr::AO__atomic_sub_fetch:
617 case AtomicExpr::AO__scoped_atomic_sub_fetch:
620 case AtomicExpr::AO__c11_atomic_fetch_sub:
621 case AtomicExpr::AO__atomic_fetch_sub:
622 case AtomicExpr::AO__scoped_atomic_fetch_sub:
623 opName = cir::AtomicFetchOp::getOperationName();
624 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
625 cir::AtomicFetchKind::Sub);
628 case AtomicExpr::AO__atomic_min_fetch:
629 case AtomicExpr::AO__scoped_atomic_min_fetch:
632 case AtomicExpr::AO__c11_atomic_fetch_min:
633 case AtomicExpr::AO__atomic_fetch_min:
634 case AtomicExpr::AO__scoped_atomic_fetch_min:
635 opName = cir::AtomicFetchOp::getOperationName();
636 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
637 cir::AtomicFetchKind::Min);
640 case AtomicExpr::AO__atomic_max_fetch:
641 case AtomicExpr::AO__scoped_atomic_max_fetch:
644 case AtomicExpr::AO__c11_atomic_fetch_max:
645 case AtomicExpr::AO__atomic_fetch_max:
646 case AtomicExpr::AO__scoped_atomic_fetch_max:
647 opName = cir::AtomicFetchOp::getOperationName();
648 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
649 cir::AtomicFetchKind::Max);
652 case AtomicExpr::AO__atomic_and_fetch:
653 case AtomicExpr::AO__scoped_atomic_and_fetch:
656 case AtomicExpr::AO__c11_atomic_fetch_and:
657 case AtomicExpr::AO__atomic_fetch_and:
658 case AtomicExpr::AO__scoped_atomic_fetch_and:
659 opName = cir::AtomicFetchOp::getOperationName();
660 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
661 cir::AtomicFetchKind::And);
664 case AtomicExpr::AO__atomic_or_fetch:
665 case AtomicExpr::AO__scoped_atomic_or_fetch:
668 case AtomicExpr::AO__c11_atomic_fetch_or:
669 case AtomicExpr::AO__atomic_fetch_or:
670 case AtomicExpr::AO__scoped_atomic_fetch_or:
671 opName = cir::AtomicFetchOp::getOperationName();
672 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
673 cir::AtomicFetchKind::Or);
676 case AtomicExpr::AO__atomic_xor_fetch:
677 case AtomicExpr::AO__scoped_atomic_xor_fetch:
680 case AtomicExpr::AO__c11_atomic_fetch_xor:
681 case AtomicExpr::AO__atomic_fetch_xor:
682 case AtomicExpr::AO__scoped_atomic_fetch_xor:
683 opName = cir::AtomicFetchOp::getOperationName();
684 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
685 cir::AtomicFetchKind::Xor);
688 case AtomicExpr::AO__atomic_nand_fetch:
689 case AtomicExpr::AO__scoped_atomic_nand_fetch:
692 case AtomicExpr::AO__c11_atomic_fetch_nand:
693 case AtomicExpr::AO__atomic_fetch_nand:
694 case AtomicExpr::AO__scoped_atomic_fetch_nand:
695 opName = cir::AtomicFetchOp::getOperationName();
696 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
697 cir::AtomicFetchKind::Nand);
700 case AtomicExpr::AO__atomic_test_and_set: {
701 auto op = cir::AtomicTestAndSetOp::create(
709 case AtomicExpr::AO__atomic_clear: {
710 cir::AtomicClearOp::create(
717 case AtomicExpr::AO__atomic_fetch_uinc:
718 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
719 opName = cir::AtomicFetchOp::getOperationName();
720 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
721 cir::AtomicFetchKind::UIncWrap);
724 case AtomicExpr::AO__atomic_fetch_udec:
725 case AtomicExpr::AO__scoped_atomic_fetch_udec:
726 opName = cir::AtomicFetchOp::getOperationName();
727 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
728 cir::AtomicFetchKind::UDecWrap);
731 case AtomicExpr::AO__opencl_atomic_init:
733 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
734 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
736 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
737 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
739 case AtomicExpr::AO__opencl_atomic_load:
740 case AtomicExpr::AO__hip_atomic_load:
742 case AtomicExpr::AO__opencl_atomic_store:
743 case AtomicExpr::AO__hip_atomic_store:
745 case AtomicExpr::AO__hip_atomic_exchange:
746 case AtomicExpr::AO__opencl_atomic_exchange:
748 case AtomicExpr::AO__hip_atomic_fetch_add:
749 case AtomicExpr::AO__opencl_atomic_fetch_add:
751 case AtomicExpr::AO__hip_atomic_fetch_sub:
752 case AtomicExpr::AO__opencl_atomic_fetch_sub:
754 case AtomicExpr::AO__hip_atomic_fetch_min:
755 case AtomicExpr::AO__opencl_atomic_fetch_min:
757 case AtomicExpr::AO__hip_atomic_fetch_max:
758 case AtomicExpr::AO__opencl_atomic_fetch_max:
760 case AtomicExpr::AO__hip_atomic_fetch_and:
761 case AtomicExpr::AO__opencl_atomic_fetch_and:
763 case AtomicExpr::AO__hip_atomic_fetch_or:
764 case AtomicExpr::AO__opencl_atomic_fetch_or:
766 case AtomicExpr::AO__hip_atomic_fetch_xor:
767 case AtomicExpr::AO__opencl_atomic_fetch_xor:
772 assert(!opName.empty() &&
"expected operation name to build");
773 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
777 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
778 atomicOperands, atomicResTys);
781 rmwOp->setAttr(
"binop", fetchAttr);
782 rmwOp->setAttr(
"mem_order", orderAttr);
783 rmwOp->setAttr(
"sync_scope", scopeAttr);
784 if (
expr->isVolatile())
785 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
786 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
787 rmwOp->setAttr(
"fetch_first", builder.getUnitAttr());
789 mlir::Value result = rmwOp->getResult(0);
799 return cir::SyncScopeKind::SingleThread;
801 return cir::SyncScopeKind::System;
803 return cir::SyncScopeKind::Device;
805 return cir::SyncScopeKind::Workgroup;
807 return cir::SyncScopeKind::Wavefront;
809 return cir::SyncScopeKind::Cluster;
812 return cir::SyncScopeKind::HIPSingleThread;
814 return cir::SyncScopeKind::HIPSystem;
816 return cir::SyncScopeKind::HIPAgent;
818 return cir::SyncScopeKind::HIPWorkgroup;
820 return cir::SyncScopeKind::HIPWavefront;
822 return cir::SyncScopeKind::HIPCluster;
825 return cir::SyncScopeKind::OpenCLWorkGroup;
827 return cir::SyncScopeKind::OpenCLDevice;
829 return cir::SyncScopeKind::OpenCLAllSVMDevices;
831 return cir::SyncScopeKind::OpenCLSubGroup;
834 llvm_unreachable(
"unhandled sync scope");
839 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
841 const std::optional<Expr::EvalResult> &scopeConst,
842 mlir::Value scopeValue) {
843 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
846 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
847 size, order, cir::SyncScopeKind::System);
851 if (scopeConst.has_value()) {
853 cgf,
expr->getScope()->getSourceRange(),
854 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
855 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
856 size, order, mappedScope);
863 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
865 unsigned fallback = scopeModel->getFallBackValue();
867 cir::SwitchOp::create(
868 builder, loc, scopeValue,
869 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
870 mlir::Block *switchBlock = builder.getBlock();
874 cgf,
expr->getScope()->getSourceRange(), scopeModel->map(fallback));
877 failureOrderExpr, size, order, fallbackScope);
879 builder.setInsertionPointToEnd(switchBlock);
882 for (
unsigned scope : allScopes) {
883 if (scope == fallback)
887 cgf,
expr->getScope()->getSourceRange(), scopeModel->map(scope));
889 mlir::ArrayAttr casesAttr = builder.getArrayAttr(
890 {cir::IntAttr::get(scopeValue.getType(), scope)});
891 mlir::OpBuilder::InsertPoint insertPoint;
892 cir::CaseOp::create(builder, loc, casesAttr, cir::CaseOpKind::Equal,
895 builder.restoreInsertionPoint(insertPoint);
897 failureOrderExpr, size, order, cirScope);
899 builder.setInsertionPointToEnd(switchBlock);
906static std::optional<cir::MemOrder>
916 if (oriOrder == cir::MemOrder::Consume ||
917 oriOrder == cir::MemOrder::Acquire ||
918 oriOrder == cir::MemOrder::AcquireRelease)
921 if (oriOrder == cir::MemOrder::Release ||
922 oriOrder == cir::MemOrder::AcquireRelease)
924 }
else if (isFence) {
925 if (oriOrder == cir::MemOrder::Relaxed)
930 if (oriOrder == cir::MemOrder::Consume)
931 return cir::MemOrder::Acquire;
936 CIRGenFunction &cgf, mlir::Value order,
bool isStore,
bool isLoad,
937 bool isFence, llvm::function_ref<
void(cir::MemOrder)> emitAtomicOpFn) {
945 cir::SwitchOp::create(
946 builder, order.getLoc(), order,
947 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
948 mlir::Block *switchBlock = builder.getBlock();
950 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders) {
952 for (int i = 1, e = caseOrders.size(); i < e; i++)
953 assert((getEffectiveAtomicMemOrder(caseOrders[i - 1], isStore,
955 getEffectiveAtomicMemOrder(caseOrders[i], isStore, isLoad,
957 "Effective memory order must be same!");
959 if (caseOrders.empty()) {
960 emitDefaultCaseLabel(builder, loc);
964 emitAtomicOpFn(cir::MemOrder::Relaxed);
965 } else if (std::optional<cir::MemOrder> actualOrder =
966 getEffectiveAtomicMemOrder(caseOrders[0], isStore,
969 if (!isFence && actualOrder == cir::MemOrder::Relaxed)
974 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
975 emitAtomicOpFn(actualOrder.value());
980 builder.createBreak(loc);
981 builder.setInsertionPointToEnd(switchBlock);
984 emitMemOrderCase( {});
985 emitMemOrderCase({cir::MemOrder::Relaxed});
986 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire});
987 emitMemOrderCase({cir::MemOrder::Release});
988 emitMemOrderCase({cir::MemOrder::AcquireRelease});
989 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent});
996 const Expr *memOrder,
bool isStore,
bool isLoad,
bool isFence,
997 llvm::function_ref<
void(cir::MemOrder)> emitAtomicOpFn) {
1001 uint64_t constOrder = eval.
Val.
getInt().getZExtValue();
1006 cir::MemOrder oriOrder =
static_cast<cir::MemOrder
>(constOrder);
1007 if (std::optional<cir::MemOrder> actualOrder =
1009 emitAtomicOpFn(actualOrder.value());
1024 memTy = ty->getValueType();
1026 Expr *isWeakExpr =
nullptr;
1027 Expr *orderFailExpr =
nullptr;
1035 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
1047 std::optional<Expr::EvalResult> scopeConst;
1050 scopeConst.emplace(std::move(eval));
1052 switch (e->
getOp()) {
1057 case AtomicExpr::AO__c11_atomic_init:
1058 llvm_unreachable(
"already handled above with emitAtomicInit");
1060 case AtomicExpr::AO__atomic_load_n:
1061 case AtomicExpr::AO__scoped_atomic_load_n:
1062 case AtomicExpr::AO__c11_atomic_load:
1063 case AtomicExpr::AO__atomic_test_and_set:
1064 case AtomicExpr::AO__atomic_clear:
1067 case AtomicExpr::AO__atomic_load:
1068 case AtomicExpr::AO__scoped_atomic_load:
1072 case AtomicExpr::AO__atomic_store:
1073 case AtomicExpr::AO__scoped_atomic_store:
1077 case AtomicExpr::AO__atomic_exchange:
1078 case AtomicExpr::AO__scoped_atomic_exchange:
1083 case AtomicExpr::AO__atomic_compare_exchange:
1084 case AtomicExpr::AO__atomic_compare_exchange_n:
1085 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1086 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1087 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1088 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1090 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1091 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1096 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
1097 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1098 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
1099 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1103 case AtomicExpr::AO__c11_atomic_fetch_add:
1104 case AtomicExpr::AO__c11_atomic_fetch_sub:
1107 "atomic fetch-and-add and fetch-and-sub for pointers");
1111 case AtomicExpr::AO__atomic_fetch_add:
1112 case AtomicExpr::AO__atomic_fetch_max:
1113 case AtomicExpr::AO__atomic_fetch_min:
1114 case AtomicExpr::AO__atomic_fetch_sub:
1115 case AtomicExpr::AO__atomic_add_fetch:
1116 case AtomicExpr::AO__atomic_max_fetch:
1117 case AtomicExpr::AO__atomic_min_fetch:
1118 case AtomicExpr::AO__atomic_sub_fetch:
1119 case AtomicExpr::AO__c11_atomic_fetch_max:
1120 case AtomicExpr::AO__c11_atomic_fetch_min:
1121 case AtomicExpr::AO__scoped_atomic_fetch_add:
1122 case AtomicExpr::AO__scoped_atomic_fetch_max:
1123 case AtomicExpr::AO__scoped_atomic_fetch_min:
1124 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1125 case AtomicExpr::AO__scoped_atomic_add_fetch:
1126 case AtomicExpr::AO__scoped_atomic_max_fetch:
1127 case AtomicExpr::AO__scoped_atomic_min_fetch:
1128 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1131 case AtomicExpr::AO__atomic_fetch_and:
1132 case AtomicExpr::AO__atomic_fetch_nand:
1133 case AtomicExpr::AO__atomic_fetch_or:
1134 case AtomicExpr::AO__atomic_fetch_xor:
1135 case AtomicExpr::AO__atomic_and_fetch:
1136 case AtomicExpr::AO__atomic_nand_fetch:
1137 case AtomicExpr::AO__atomic_or_fetch:
1138 case AtomicExpr::AO__atomic_xor_fetch:
1139 case AtomicExpr::AO__atomic_exchange_n:
1140 case AtomicExpr::AO__atomic_store_n:
1141 case AtomicExpr::AO__c11_atomic_fetch_and:
1142 case AtomicExpr::AO__c11_atomic_fetch_nand:
1143 case AtomicExpr::AO__c11_atomic_fetch_or:
1144 case AtomicExpr::AO__c11_atomic_fetch_xor:
1145 case AtomicExpr::AO__c11_atomic_exchange:
1146 case AtomicExpr::AO__c11_atomic_store:
1147 case AtomicExpr::AO__scoped_atomic_fetch_and:
1148 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1149 case AtomicExpr::AO__scoped_atomic_fetch_or:
1150 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1151 case AtomicExpr::AO__scoped_atomic_and_fetch:
1152 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1153 case AtomicExpr::AO__scoped_atomic_or_fetch:
1154 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1155 case AtomicExpr::AO__scoped_atomic_store_n:
1156 case AtomicExpr::AO__scoped_atomic_exchange_n:
1157 case AtomicExpr::AO__atomic_fetch_uinc:
1158 case AtomicExpr::AO__atomic_fetch_udec:
1159 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1160 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1167 bool shouldCastToIntPtrTy =
1176 if (shouldCastToIntPtrTy) {
1177 ptr = atomics.castToAtomicIntPointer(ptr);
1179 val1 = atomics.convertToAtomicIntPointer(val1);
1182 if (shouldCastToIntPtrTy)
1183 dest = atomics.castToAtomicIntPointer(dest);
1186 }
else if (e->
getOp() == AtomicExpr::AO__atomic_test_and_set) {
1188 "test_and_set.bool");
1190 dest = atomics.createTempAlloca();
1191 if (shouldCastToIntPtrTy)
1192 dest = atomics.castToAtomicIntPointer(dest);
1195 bool powerOf2Size = (size & (size - 1)) == 0;
1196 bool useLibCall = !powerOf2Size || (size > 16);
1213 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1214 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1215 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1216 e->
getOp() == AtomicExpr::AO__atomic_store ||
1217 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
1218 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1219 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1220 e->
getOp() == AtomicExpr::AO__atomic_clear;
1221 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1222 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1223 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1224 e->
getOp() == AtomicExpr::AO__atomic_load ||
1225 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
1226 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1227 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1229 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
1230 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
1231 size, memOrder, scopeConst, scope);
1234 emitAtomicOpCallBackFn);
1247 return emitAtomicLoad(lvalue, loc, cir::MemOrder::SequentiallyConsistent,
1254 cir::MemOrder order,
bool isVolatile,
1256 AtomicInfo info(*
this, lvalue,
getLoc(loc));
1257 return info.emitAtomicLoad(slot, loc,
true, order, isVolatile);
1262 auto order = cir::MemOrder::SequentiallyConsistent;
1275 cir::MemOrder order,
bool isVolatile,
1279 mlir::Location loc = dest.
getPointer().getLoc();
1284 AtomicInfo atomics(*
this, dest, loc);
1285 LValue lvalue = atomics.getAtomicLValue();
1290 atomics.emitCopyIntoMemory(rvalue);
1295 if (atomics.shouldUseLibCall()) {
1297 cgm.errorNYI(loc,
"emitAtomicStore: atomic store with library call");
1302 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1305 Address addr = atomics.getAtomicAddress();
1306 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1308 addr = atomics.castToAtomicIntPointer(addr);
1313 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1318 store.setMemOrder(order);
1323 store.setIsVolatile(
true);
1329 cgm.errorNYI(loc,
"emitAtomicStore: non-simple atomic lvalue");
1336 switch (atomics.getEvaluationKind()) {
1352 bool zeroed =
false;
1354 zeroed = atomics.emitMemSetZeroIfNecessary();
1355 dest = atomics.projectValue();
1370 llvm_unreachable(
"bad evaluation kind");
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder, cir::SyncScopeKind scope)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOpFn)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static void emitDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static std::optional< cir::MemOrder > getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad, bool isFence)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder, cir::SyncScopeKind scope)
cir::BreakOp createBreak(mlir::Location loc)
Create a break operation.
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
RValue emitAtomicLoad(LValue lvalue, SourceLocation loc, AggValueSlot slot=AggValueSlot::ignored())
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
This trivial value class is used to represent the result of an expression that is evaluated.
Address getAggregateAddress() const
Return the value of the address of the aggregate.
static RValue get(mlir::Value v)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isPointerType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
@ Address
A pointer to a ValueDecl.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
SyncScope
Defines sync scope values used internally by clang.
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.