27 CharUnits atomicAlign;
30 bool useLibCall =
true;
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (
auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
45 evaluationKind = cgf.getEvaluationKind(valueTy);
48 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
51 valueSizeInBits = valueTypeInfo.
Width;
52 atomicSizeInBits = atomicTypeInfo.
Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
61 this->lvalue = lvalue;
64 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
67 atomicSizeInBits, ctx.
toBits(lvalue.getAlignment()));
70 QualType getValueType()
const {
return valueTy; }
71 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
73 mlir::Value getAtomicPointer()
const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
79 bool shouldUseLibCall()
const {
return useLibCall; }
80 const LValue &getAtomicLValue()
const {
return lvalue; }
81 Address getAtomicAddress()
const {
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
87 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
98 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
100 bool emitMemSetZeroIfNecessary()
const;
102 mlir::Value getScalarRValValueOrNull(RValue rvalue)
const;
106 Address castToAtomicIntPointer(Address addr)
const;
111 Address convertToAtomicIntPointer(Address addr)
const;
114 mlir::Value convertRValueToInt(RValue rvalue,
bool cmpxchg =
false)
const;
117 void emitCopyIntoMemory(RValue rvalue)
const;
120 LValue projectValue()
const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
124 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
132 Address createTempAlloca()
const;
135 bool requiresMemSetZero(mlir::Type ty)
const;
151 uint64_t expectedSize) {
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
164 switch (getEvaluationKind()) {
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
177 llvm_unreachable(
"bad evaluation kind");
180Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
183 if (sourceSizeInBits != atomicSizeInBits) {
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
189 return castToAtomicIntPointer(addr);
192Address AtomicInfo::createTempAlloca()
const {
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
196 getAtomicAlignment(), loc,
"atomic-temp");
199 if (lvalue.isBitField()) {
200 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue)
const {
207 if (rvalue.
isScalar() && (!hasPadding() || !lvalue.isSimple()))
212Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
221bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
238 if (cir::isAnyFloatingPointType(valueTy))
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue,
bool cmpxchg)
const {
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
251 loc,
"AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
256 loc,
"AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
263 assert(lvalue.isSimple());
269 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
276 emitMemSetZeroIfNecessary();
279 LValue tempLValue = projectValue();
285 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
295 builder.restoreInsertionPoint(insertPoint);
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType,
static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
311 builder.restoreInsertionPoint(insertPoint);
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
322 mlir::Value expected = builder.
createLoad(loc, val1);
323 mlir::Value desired = builder.
createLoad(loc, val2);
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
333 cmpxchg.setWeak(isWeak);
335 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed,
false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
361 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
363 cir::MemOrder failureOrder;
365 failureOrder = cir::MemOrder::Relaxed;
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
401 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order, cir::SyncScopeKind scope) {
404 llvm::StringRef opName;
407 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
408 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
409 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
410 cir::AtomicFetchKindAttr fetchAttr;
411 bool fetchFirst =
true;
413 switch (
expr->getOp()) {
414 case AtomicExpr::AO__c11_atomic_init:
415 llvm_unreachable(
"already handled!");
417 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
419 val2, failureOrderExpr, size, order);
422 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
424 val2, failureOrderExpr, size, order);
427 case AtomicExpr::AO__atomic_compare_exchange:
428 case AtomicExpr::AO__atomic_compare_exchange_n: {
432 failureOrderExpr, size, order);
436 "emitAtomicOp: non-constant isWeak");
441 case AtomicExpr::AO__c11_atomic_load:
442 case AtomicExpr::AO__atomic_load_n:
443 case AtomicExpr::AO__atomic_load:
444 case AtomicExpr::AO__scoped_atomic_load_n:
445 case AtomicExpr::AO__scoped_atomic_load: {
449 load->setAttr(
"mem_order", orderAttr);
450 load->setAttr(
"sync_scope", scopeAttr);
452 builder.
createStore(loc, load->getResult(0), dest);
456 case AtomicExpr::AO__c11_atomic_store:
457 case AtomicExpr::AO__atomic_store_n:
458 case AtomicExpr::AO__atomic_store:
459 case AtomicExpr::AO__scoped_atomic_store:
460 case AtomicExpr::AO__scoped_atomic_store_n: {
461 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
466 mlir::IntegerAttr{}, scopeAttr, orderAttr);
470 case AtomicExpr::AO__c11_atomic_exchange:
471 case AtomicExpr::AO__atomic_exchange_n:
472 case AtomicExpr::AO__atomic_exchange:
473 opName = cir::AtomicXchgOp::getOperationName();
476 case AtomicExpr::AO__atomic_add_fetch:
479 case AtomicExpr::AO__c11_atomic_fetch_add:
480 case AtomicExpr::AO__atomic_fetch_add:
481 opName = cir::AtomicFetchOp::getOperationName();
482 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
483 cir::AtomicFetchKind::Add);
486 case AtomicExpr::AO__atomic_sub_fetch:
489 case AtomicExpr::AO__c11_atomic_fetch_sub:
490 case AtomicExpr::AO__atomic_fetch_sub:
491 opName = cir::AtomicFetchOp::getOperationName();
492 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
493 cir::AtomicFetchKind::Sub);
496 case AtomicExpr::AO__atomic_min_fetch:
499 case AtomicExpr::AO__c11_atomic_fetch_min:
500 case AtomicExpr::AO__atomic_fetch_min:
501 opName = cir::AtomicFetchOp::getOperationName();
502 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
503 cir::AtomicFetchKind::Min);
506 case AtomicExpr::AO__atomic_max_fetch:
509 case AtomicExpr::AO__c11_atomic_fetch_max:
510 case AtomicExpr::AO__atomic_fetch_max:
511 opName = cir::AtomicFetchOp::getOperationName();
512 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
513 cir::AtomicFetchKind::Max);
516 case AtomicExpr::AO__atomic_and_fetch:
519 case AtomicExpr::AO__c11_atomic_fetch_and:
520 case AtomicExpr::AO__atomic_fetch_and:
521 opName = cir::AtomicFetchOp::getOperationName();
522 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
523 cir::AtomicFetchKind::And);
526 case AtomicExpr::AO__atomic_or_fetch:
529 case AtomicExpr::AO__c11_atomic_fetch_or:
530 case AtomicExpr::AO__atomic_fetch_or:
531 opName = cir::AtomicFetchOp::getOperationName();
532 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
533 cir::AtomicFetchKind::Or);
536 case AtomicExpr::AO__atomic_xor_fetch:
539 case AtomicExpr::AO__c11_atomic_fetch_xor:
540 case AtomicExpr::AO__atomic_fetch_xor:
541 opName = cir::AtomicFetchOp::getOperationName();
542 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
543 cir::AtomicFetchKind::Xor);
546 case AtomicExpr::AO__atomic_nand_fetch:
549 case AtomicExpr::AO__c11_atomic_fetch_nand:
550 case AtomicExpr::AO__atomic_fetch_nand:
551 opName = cir::AtomicFetchOp::getOperationName();
552 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
553 cir::AtomicFetchKind::Nand);
556 case AtomicExpr::AO__atomic_test_and_set: {
557 auto op = cir::AtomicTestAndSetOp::create(
565 case AtomicExpr::AO__atomic_clear: {
566 cir::AtomicClearOp::create(
573 case AtomicExpr::AO__opencl_atomic_init:
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
578 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
579 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
581 case AtomicExpr::AO__scoped_atomic_compare_exchange:
582 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
587 case AtomicExpr::AO__opencl_atomic_store:
588 case AtomicExpr::AO__hip_atomic_store:
590 case AtomicExpr::AO__hip_atomic_exchange:
591 case AtomicExpr::AO__opencl_atomic_exchange:
592 case AtomicExpr::AO__scoped_atomic_exchange_n:
593 case AtomicExpr::AO__scoped_atomic_exchange:
595 case AtomicExpr::AO__scoped_atomic_add_fetch:
597 case AtomicExpr::AO__hip_atomic_fetch_add:
598 case AtomicExpr::AO__opencl_atomic_fetch_add:
599 case AtomicExpr::AO__scoped_atomic_fetch_add:
601 case AtomicExpr::AO__scoped_atomic_sub_fetch:
603 case AtomicExpr::AO__hip_atomic_fetch_sub:
604 case AtomicExpr::AO__opencl_atomic_fetch_sub:
605 case AtomicExpr::AO__scoped_atomic_fetch_sub:
607 case AtomicExpr::AO__scoped_atomic_min_fetch:
609 case AtomicExpr::AO__hip_atomic_fetch_min:
610 case AtomicExpr::AO__opencl_atomic_fetch_min:
611 case AtomicExpr::AO__scoped_atomic_fetch_min:
613 case AtomicExpr::AO__scoped_atomic_max_fetch:
615 case AtomicExpr::AO__hip_atomic_fetch_max:
616 case AtomicExpr::AO__opencl_atomic_fetch_max:
617 case AtomicExpr::AO__scoped_atomic_fetch_max:
619 case AtomicExpr::AO__scoped_atomic_and_fetch:
621 case AtomicExpr::AO__hip_atomic_fetch_and:
622 case AtomicExpr::AO__opencl_atomic_fetch_and:
623 case AtomicExpr::AO__scoped_atomic_fetch_and:
625 case AtomicExpr::AO__scoped_atomic_or_fetch:
627 case AtomicExpr::AO__hip_atomic_fetch_or:
628 case AtomicExpr::AO__opencl_atomic_fetch_or:
629 case AtomicExpr::AO__scoped_atomic_fetch_or:
631 case AtomicExpr::AO__scoped_atomic_xor_fetch:
633 case AtomicExpr::AO__hip_atomic_fetch_xor:
634 case AtomicExpr::AO__opencl_atomic_fetch_xor:
635 case AtomicExpr::AO__scoped_atomic_fetch_xor:
637 case AtomicExpr::AO__scoped_atomic_nand_fetch:
639 case AtomicExpr::AO__scoped_atomic_fetch_nand:
641 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
642 case AtomicExpr::AO__scoped_atomic_fetch_udec:
643 case AtomicExpr::AO__atomic_fetch_uinc:
644 case AtomicExpr::AO__atomic_fetch_udec:
649 assert(!opName.empty() &&
"expected operation name to build");
650 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
654 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
655 atomicOperands, atomicResTys);
658 rmwOp->setAttr(
"binop", fetchAttr);
659 rmwOp->setAttr(
"mem_order", orderAttr);
660 if (
expr->isVolatile())
661 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
662 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
663 rmwOp->setAttr(
"fetch_first", builder.getUnitAttr());
665 mlir::Value result = rmwOp->getResult(0);
676 cgf.
cgm.
errorNYI(range,
"convertSyncScopeToCIR: unhandled sync scope");
677 return cir::SyncScopeKind::System;
681 return cir::SyncScopeKind::SingleThread;
683 return cir::SyncScopeKind::System;
689 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
691 const std::optional<Expr::EvalResult> &scopeConst,
692 mlir::Value scopeValue) {
693 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
696 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
697 size, order, cir::SyncScopeKind::System);
701 if (scopeConst.has_value()) {
703 cgf,
expr->getScope()->getSourceRange(),
704 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
705 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
706 size, order, mappedScope);
711 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: dynamic sync scope");
714static std::optional<cir::MemOrder>
724 if (oriOrder == cir::MemOrder::Consume ||
725 oriOrder == cir::MemOrder::Acquire ||
726 oriOrder == cir::MemOrder::AcquireRelease)
729 if (oriOrder == cir::MemOrder::Release ||
730 oriOrder == cir::MemOrder::AcquireRelease)
732 }
else if (isFence) {
733 if (oriOrder == cir::MemOrder::Relaxed)
738 if (oriOrder == cir::MemOrder::Consume)
739 return cir::MemOrder::Acquire;
744 CIRGenFunction &cgf, mlir::Value order,
bool isStore,
bool isLoad,
745 bool isFence, llvm::function_ref<
void(cir::MemOrder)> emitAtomicOpFn) {
753 cir::SwitchOp::create(
754 builder, order.getLoc(), order,
755 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
756 mlir::Block *switchBlock = builder.getBlock();
758 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders) {
760 for (int i = 1, e = caseOrders.size(); i < e; i++)
761 assert((getEffectiveAtomicMemOrder(caseOrders[i - 1], isStore,
763 getEffectiveAtomicMemOrder(caseOrders[i], isStore, isLoad,
765 "Effective memory order must be same!");
767 if (caseOrders.empty()) {
768 emitMemOrderDefaultCaseLabel(builder, loc);
772 emitAtomicOpFn(cir::MemOrder::Relaxed);
773 } else if (std::optional<cir::MemOrder> actualOrder =
774 getEffectiveAtomicMemOrder(caseOrders[0], isStore,
777 if (!isFence && actualOrder == cir::MemOrder::Relaxed)
782 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
783 emitAtomicOpFn(actualOrder.value());
788 builder.createBreak(loc);
789 builder.setInsertionPointToEnd(switchBlock);
792 emitMemOrderCase( {});
793 emitMemOrderCase({cir::MemOrder::Relaxed});
794 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire});
795 emitMemOrderCase({cir::MemOrder::Release});
796 emitMemOrderCase({cir::MemOrder::AcquireRelease});
797 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent});
804 const Expr *memOrder,
bool isStore,
bool isLoad,
bool isFence,
805 llvm::function_ref<
void(cir::MemOrder)> emitAtomicOpFn) {
809 uint64_t constOrder = eval.
Val.
getInt().getZExtValue();
814 cir::MemOrder oriOrder =
static_cast<cir::MemOrder
>(constOrder);
815 if (std::optional<cir::MemOrder> actualOrder =
817 emitAtomicOpFn(actualOrder.value());
832 memTy = ty->getValueType();
834 Expr *isWeakExpr =
nullptr;
835 Expr *orderFailExpr =
nullptr;
843 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
855 std::optional<Expr::EvalResult> scopeConst;
858 scopeConst.emplace(std::move(eval));
860 bool shouldCastToIntPtrTy =
true;
862 switch (e->
getOp()) {
867 case AtomicExpr::AO__c11_atomic_init:
868 llvm_unreachable(
"already handled above with emitAtomicInit");
870 case AtomicExpr::AO__atomic_load_n:
871 case AtomicExpr::AO__scoped_atomic_load_n:
872 case AtomicExpr::AO__c11_atomic_load:
873 case AtomicExpr::AO__atomic_test_and_set:
874 case AtomicExpr::AO__atomic_clear:
877 case AtomicExpr::AO__atomic_load:
878 case AtomicExpr::AO__scoped_atomic_load:
882 case AtomicExpr::AO__atomic_store:
883 case AtomicExpr::AO__scoped_atomic_store:
887 case AtomicExpr::AO__atomic_exchange:
892 case AtomicExpr::AO__atomic_compare_exchange:
893 case AtomicExpr::AO__atomic_compare_exchange_n:
894 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
895 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
897 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
898 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
903 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
904 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
905 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
906 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
910 case AtomicExpr::AO__c11_atomic_fetch_add:
911 case AtomicExpr::AO__c11_atomic_fetch_sub:
914 "atomic fetch-and-add and fetch-and-sub for pointers");
918 case AtomicExpr::AO__atomic_fetch_add:
919 case AtomicExpr::AO__atomic_fetch_max:
920 case AtomicExpr::AO__atomic_fetch_min:
921 case AtomicExpr::AO__atomic_fetch_sub:
922 case AtomicExpr::AO__atomic_add_fetch:
923 case AtomicExpr::AO__atomic_max_fetch:
924 case AtomicExpr::AO__atomic_min_fetch:
925 case AtomicExpr::AO__atomic_sub_fetch:
926 case AtomicExpr::AO__c11_atomic_fetch_max:
927 case AtomicExpr::AO__c11_atomic_fetch_min:
931 case AtomicExpr::AO__atomic_fetch_and:
932 case AtomicExpr::AO__atomic_fetch_nand:
933 case AtomicExpr::AO__atomic_fetch_or:
934 case AtomicExpr::AO__atomic_fetch_xor:
935 case AtomicExpr::AO__atomic_and_fetch:
936 case AtomicExpr::AO__atomic_nand_fetch:
937 case AtomicExpr::AO__atomic_or_fetch:
938 case AtomicExpr::AO__atomic_xor_fetch:
939 case AtomicExpr::AO__atomic_exchange_n:
940 case AtomicExpr::AO__atomic_store_n:
941 case AtomicExpr::AO__c11_atomic_fetch_and:
942 case AtomicExpr::AO__c11_atomic_fetch_nand:
943 case AtomicExpr::AO__c11_atomic_fetch_or:
944 case AtomicExpr::AO__c11_atomic_fetch_xor:
945 case AtomicExpr::AO__c11_atomic_exchange:
946 case AtomicExpr::AO__c11_atomic_store:
947 case AtomicExpr::AO__scoped_atomic_store_n:
960 if (shouldCastToIntPtrTy) {
961 ptr = atomics.castToAtomicIntPointer(ptr);
963 val1 = atomics.convertToAtomicIntPointer(val1);
966 if (shouldCastToIntPtrTy)
967 dest = atomics.castToAtomicIntPointer(dest);
970 }
else if (e->
getOp() == AtomicExpr::AO__atomic_test_and_set) {
972 "test_and_set.bool");
974 dest = atomics.createTempAlloca();
975 if (shouldCastToIntPtrTy)
976 dest = atomics.castToAtomicIntPointer(dest);
979 bool powerOf2Size = (size & (size - 1)) == 0;
980 bool useLibCall = !powerOf2Size || (size > 16);
997 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
998 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
999 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1000 e->
getOp() == AtomicExpr::AO__atomic_store ||
1001 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
1002 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1003 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1004 e->
getOp() == AtomicExpr::AO__atomic_clear;
1005 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1006 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1007 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1008 e->
getOp() == AtomicExpr::AO__atomic_load ||
1009 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
1010 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1011 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1013 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
1014 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
1015 size, memOrder, scopeConst, scope);
1018 emitAtomicOpCallBackFn);
1030 auto order = cir::MemOrder::SequentiallyConsistent;
1043 cir::MemOrder order,
bool isVolatile,
1047 mlir::Location loc = dest.
getPointer().getLoc();
1052 AtomicInfo atomics(*
this, dest, loc);
1053 LValue lvalue = atomics.getAtomicLValue();
1058 atomics.emitCopyIntoMemory(rvalue);
1063 if (atomics.shouldUseLibCall()) {
1065 cgm.errorNYI(loc,
"emitAtomicStore: atomic store with library call");
1070 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1073 Address addr = atomics.getAtomicAddress();
1074 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1076 addr = atomics.castToAtomicIntPointer(addr);
1081 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1086 store.setMemOrder(order);
1091 store.setIsVolatile(
true);
1097 cgm.errorNYI(loc,
"emitAtomicStore: non-simple atomic lvalue");
1104 switch (atomics.getEvaluationKind()) {
1120 bool zeroed =
false;
1122 zeroed = atomics.emitMemSetZeroIfNecessary();
1123 dest = atomics.projectValue();
1138 llvm_unreachable(
"bad evaluation kind");
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOpFn)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static std::optional< cir::MemOrder > getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad, bool isFence)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
This trivial value class is used to represent the result of an expression that is evaluated.
Address getAggregateAddress() const
Return the value of the address of the aggregate.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isPointerType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
SyncScope
Defines sync scope values used internally by clang.
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.