27 CharUnits atomicAlign;
30 bool useLibCall =
true;
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (
auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
45 evaluationKind = cgf.getEvaluationKind(valueTy);
48 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
51 valueSizeInBits = valueTypeInfo.
Width;
52 atomicSizeInBits = atomicTypeInfo.
Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
61 this->lvalue = lvalue;
64 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
67 atomicSizeInBits, ctx.
toBits(lvalue.getAlignment()));
70 QualType getValueType()
const {
return valueTy; }
71 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
73 mlir::Value getAtomicPointer()
const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
79 bool shouldUseLibCall()
const {
return useLibCall; }
80 const LValue &getAtomicLValue()
const {
return lvalue; }
81 Address getAtomicAddress()
const {
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
87 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
98 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
100 bool emitMemSetZeroIfNecessary()
const;
102 mlir::Value getScalarRValValueOrNull(RValue rvalue)
const;
106 Address castToAtomicIntPointer(Address addr)
const;
111 Address convertToAtomicIntPointer(Address addr)
const;
114 mlir::Value convertRValueToInt(RValue rvalue,
bool cmpxchg =
false)
const;
117 void emitCopyIntoMemory(RValue rvalue)
const;
120 LValue projectValue()
const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
124 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
132 Address createTempAlloca()
const;
135 bool requiresMemSetZero(mlir::Type ty)
const;
151 uint64_t expectedSize) {
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
164 switch (getEvaluationKind()) {
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
177 llvm_unreachable(
"bad evaluation kind");
180Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
183 if (sourceSizeInBits != atomicSizeInBits) {
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
189 return castToAtomicIntPointer(addr);
192Address AtomicInfo::createTempAlloca()
const {
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
196 getAtomicAlignment(), loc,
"atomic-temp");
199 if (lvalue.isBitField()) {
200 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue)
const {
207 if (rvalue.
isScalar() && (!hasPadding() || !lvalue.isSimple()))
212Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
221bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
238 if (cir::isAnyFloatingPointType(valueTy))
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue,
bool cmpxchg)
const {
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
251 loc,
"AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
256 loc,
"AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
263 assert(lvalue.isSimple());
269 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
276 emitMemSetZeroIfNecessary();
279 LValue tempLValue = projectValue();
285 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
295 builder.restoreInsertionPoint(insertPoint);
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType,
static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
311 builder.restoreInsertionPoint(insertPoint);
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
322 mlir::Value expected = builder.
createLoad(loc, val1);
323 mlir::Value desired = builder.
createLoad(loc, val2);
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
333 cmpxchg.setWeak(isWeak);
335 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed,
false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
361 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
363 cir::MemOrder failureOrder;
365 failureOrder = cir::MemOrder::Relaxed;
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
401 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order) {
403 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
406 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: atomic scope");
411 llvm::StringRef opName;
414 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
415 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
416 cir::AtomicFetchKindAttr fetchAttr;
417 bool fetchFirst =
true;
419 switch (
expr->getOp()) {
420 case AtomicExpr::AO__c11_atomic_init:
421 llvm_unreachable(
"already handled!");
423 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
425 val2, failureOrderExpr, size, order);
428 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
430 val2, failureOrderExpr, size, order);
433 case AtomicExpr::AO__atomic_compare_exchange:
434 case AtomicExpr::AO__atomic_compare_exchange_n: {
438 failureOrderExpr, size, order);
442 "emitAtomicOp: non-constant isWeak");
447 case AtomicExpr::AO__c11_atomic_load:
448 case AtomicExpr::AO__atomic_load_n:
449 case AtomicExpr::AO__atomic_load: {
455 load->setAttr(
"mem_order", orderAttr);
457 builder.
createStore(loc, load->getResult(0), dest);
461 case AtomicExpr::AO__c11_atomic_store:
462 case AtomicExpr::AO__atomic_store_n:
463 case AtomicExpr::AO__atomic_store: {
464 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
469 mlir::IntegerAttr{}, orderAttr);
473 case AtomicExpr::AO__c11_atomic_exchange:
474 case AtomicExpr::AO__atomic_exchange_n:
475 case AtomicExpr::AO__atomic_exchange:
476 opName = cir::AtomicXchgOp::getOperationName();
479 case AtomicExpr::AO__atomic_add_fetch:
482 case AtomicExpr::AO__c11_atomic_fetch_add:
483 case AtomicExpr::AO__atomic_fetch_add:
484 opName = cir::AtomicFetchOp::getOperationName();
485 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
486 cir::AtomicFetchKind::Add);
489 case AtomicExpr::AO__atomic_sub_fetch:
492 case AtomicExpr::AO__c11_atomic_fetch_sub:
493 case AtomicExpr::AO__atomic_fetch_sub:
494 opName = cir::AtomicFetchOp::getOperationName();
495 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
496 cir::AtomicFetchKind::Sub);
499 case AtomicExpr::AO__atomic_min_fetch:
502 case AtomicExpr::AO__c11_atomic_fetch_min:
503 case AtomicExpr::AO__atomic_fetch_min:
504 opName = cir::AtomicFetchOp::getOperationName();
505 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
506 cir::AtomicFetchKind::Min);
509 case AtomicExpr::AO__atomic_max_fetch:
512 case AtomicExpr::AO__c11_atomic_fetch_max:
513 case AtomicExpr::AO__atomic_fetch_max:
514 opName = cir::AtomicFetchOp::getOperationName();
515 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
516 cir::AtomicFetchKind::Max);
519 case AtomicExpr::AO__atomic_and_fetch:
522 case AtomicExpr::AO__c11_atomic_fetch_and:
523 case AtomicExpr::AO__atomic_fetch_and:
524 opName = cir::AtomicFetchOp::getOperationName();
525 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
526 cir::AtomicFetchKind::And);
529 case AtomicExpr::AO__atomic_or_fetch:
532 case AtomicExpr::AO__c11_atomic_fetch_or:
533 case AtomicExpr::AO__atomic_fetch_or:
534 opName = cir::AtomicFetchOp::getOperationName();
535 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
536 cir::AtomicFetchKind::Or);
539 case AtomicExpr::AO__atomic_xor_fetch:
542 case AtomicExpr::AO__c11_atomic_fetch_xor:
543 case AtomicExpr::AO__atomic_fetch_xor:
544 opName = cir::AtomicFetchOp::getOperationName();
545 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
546 cir::AtomicFetchKind::Xor);
549 case AtomicExpr::AO__atomic_nand_fetch:
552 case AtomicExpr::AO__c11_atomic_fetch_nand:
553 case AtomicExpr::AO__atomic_fetch_nand:
554 opName = cir::AtomicFetchOp::getOperationName();
555 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
556 cir::AtomicFetchKind::Nand);
559 case AtomicExpr::AO__atomic_test_and_set: {
560 auto op = cir::AtomicTestAndSetOp::create(
568 case AtomicExpr::AO__atomic_clear: {
569 cir::AtomicClearOp::create(
576 case AtomicExpr::AO__opencl_atomic_init:
578 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
579 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
581 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
582 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
584 case AtomicExpr::AO__scoped_atomic_compare_exchange:
585 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
587 case AtomicExpr::AO__opencl_atomic_load:
588 case AtomicExpr::AO__hip_atomic_load:
589 case AtomicExpr::AO__scoped_atomic_load_n:
590 case AtomicExpr::AO__scoped_atomic_load:
592 case AtomicExpr::AO__opencl_atomic_store:
593 case AtomicExpr::AO__hip_atomic_store:
594 case AtomicExpr::AO__scoped_atomic_store:
595 case AtomicExpr::AO__scoped_atomic_store_n:
597 case AtomicExpr::AO__hip_atomic_exchange:
598 case AtomicExpr::AO__opencl_atomic_exchange:
599 case AtomicExpr::AO__scoped_atomic_exchange_n:
600 case AtomicExpr::AO__scoped_atomic_exchange:
602 case AtomicExpr::AO__scoped_atomic_add_fetch:
604 case AtomicExpr::AO__hip_atomic_fetch_add:
605 case AtomicExpr::AO__opencl_atomic_fetch_add:
606 case AtomicExpr::AO__scoped_atomic_fetch_add:
608 case AtomicExpr::AO__scoped_atomic_sub_fetch:
610 case AtomicExpr::AO__hip_atomic_fetch_sub:
611 case AtomicExpr::AO__opencl_atomic_fetch_sub:
612 case AtomicExpr::AO__scoped_atomic_fetch_sub:
614 case AtomicExpr::AO__scoped_atomic_min_fetch:
616 case AtomicExpr::AO__hip_atomic_fetch_min:
617 case AtomicExpr::AO__opencl_atomic_fetch_min:
618 case AtomicExpr::AO__scoped_atomic_fetch_min:
620 case AtomicExpr::AO__scoped_atomic_max_fetch:
622 case AtomicExpr::AO__hip_atomic_fetch_max:
623 case AtomicExpr::AO__opencl_atomic_fetch_max:
624 case AtomicExpr::AO__scoped_atomic_fetch_max:
626 case AtomicExpr::AO__scoped_atomic_and_fetch:
628 case AtomicExpr::AO__hip_atomic_fetch_and:
629 case AtomicExpr::AO__opencl_atomic_fetch_and:
630 case AtomicExpr::AO__scoped_atomic_fetch_and:
632 case AtomicExpr::AO__scoped_atomic_or_fetch:
634 case AtomicExpr::AO__hip_atomic_fetch_or:
635 case AtomicExpr::AO__opencl_atomic_fetch_or:
636 case AtomicExpr::AO__scoped_atomic_fetch_or:
638 case AtomicExpr::AO__scoped_atomic_xor_fetch:
640 case AtomicExpr::AO__hip_atomic_fetch_xor:
641 case AtomicExpr::AO__opencl_atomic_fetch_xor:
642 case AtomicExpr::AO__scoped_atomic_fetch_xor:
644 case AtomicExpr::AO__scoped_atomic_nand_fetch:
646 case AtomicExpr::AO__scoped_atomic_fetch_nand:
651 assert(!opName.empty() &&
"expected operation name to build");
652 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
656 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
657 atomicOperands, atomicResTys);
660 rmwOp->setAttr(
"binop", fetchAttr);
661 rmwOp->setAttr(
"mem_order", orderAttr);
662 if (
expr->isVolatile())
663 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
664 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
665 rmwOp->setAttr(
"fetch_first", builder.getUnitAttr());
667 mlir::Value result = rmwOp->getResult(0);
674 auto memOrder =
static_cast<cir::MemOrder
>(order);
676 return memOrder != cir::MemOrder::Consume &&
677 memOrder != cir::MemOrder::Acquire &&
678 memOrder != cir::MemOrder::AcquireRelease;
680 return memOrder != cir::MemOrder::Release &&
681 memOrder != cir::MemOrder::AcquireRelease;
688 Expr *orderFailExpr, uint64_t size,
bool isStore,
bool isLoad) {
694 cir::SwitchOp::create(
695 builder, order.getLoc(), order,
696 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
697 mlir::Block *switchBlock = builder.getBlock();
699 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
700 cir::MemOrder actualOrder) {
701 if (caseOrders.empty())
702 emitMemOrderDefaultCaseLabel(builder, loc);
704 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
705 emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
707 builder.createBreak(loc);
708 builder.setInsertionPointToEnd(switchBlock);
716 emitMemOrderCase({}, cir::MemOrder::Relaxed);
724 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
725 cir::MemOrder::Acquire);
731 emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
734 if (!isLoad && !isStore) {
737 emitMemOrderCase({cir::MemOrder::AcquireRelease},
738 cir::MemOrder::AcquireRelease);
742 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
743 cir::MemOrder::SequentiallyConsistent);
753 memTy = ty->getValueType();
755 Expr *isWeakExpr =
nullptr;
756 Expr *orderFailExpr =
nullptr;
764 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
778 bool shouldCastToIntPtrTy =
true;
780 switch (e->
getOp()) {
785 case AtomicExpr::AO__c11_atomic_init:
786 llvm_unreachable(
"already handled above with emitAtomicInit");
788 case AtomicExpr::AO__atomic_load_n:
789 case AtomicExpr::AO__c11_atomic_load:
790 case AtomicExpr::AO__atomic_test_and_set:
791 case AtomicExpr::AO__atomic_clear:
794 case AtomicExpr::AO__atomic_load:
798 case AtomicExpr::AO__atomic_store:
802 case AtomicExpr::AO__atomic_exchange:
807 case AtomicExpr::AO__atomic_compare_exchange:
808 case AtomicExpr::AO__atomic_compare_exchange_n:
809 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
810 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
812 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
813 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
818 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
819 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
820 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
821 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
825 case AtomicExpr::AO__c11_atomic_fetch_add:
826 case AtomicExpr::AO__c11_atomic_fetch_sub:
829 "atomic fetch-and-add and fetch-and-sub for pointers");
833 case AtomicExpr::AO__atomic_fetch_add:
834 case AtomicExpr::AO__atomic_fetch_max:
835 case AtomicExpr::AO__atomic_fetch_min:
836 case AtomicExpr::AO__atomic_fetch_sub:
837 case AtomicExpr::AO__atomic_add_fetch:
838 case AtomicExpr::AO__atomic_max_fetch:
839 case AtomicExpr::AO__atomic_min_fetch:
840 case AtomicExpr::AO__atomic_sub_fetch:
841 case AtomicExpr::AO__c11_atomic_fetch_max:
842 case AtomicExpr::AO__c11_atomic_fetch_min:
846 case AtomicExpr::AO__atomic_fetch_and:
847 case AtomicExpr::AO__atomic_fetch_nand:
848 case AtomicExpr::AO__atomic_fetch_or:
849 case AtomicExpr::AO__atomic_fetch_xor:
850 case AtomicExpr::AO__atomic_and_fetch:
851 case AtomicExpr::AO__atomic_nand_fetch:
852 case AtomicExpr::AO__atomic_or_fetch:
853 case AtomicExpr::AO__atomic_xor_fetch:
854 case AtomicExpr::AO__atomic_exchange_n:
855 case AtomicExpr::AO__atomic_store_n:
856 case AtomicExpr::AO__c11_atomic_fetch_and:
857 case AtomicExpr::AO__c11_atomic_fetch_nand:
858 case AtomicExpr::AO__c11_atomic_fetch_or:
859 case AtomicExpr::AO__c11_atomic_fetch_xor:
860 case AtomicExpr::AO__c11_atomic_exchange:
861 case AtomicExpr::AO__c11_atomic_store:
874 if (shouldCastToIntPtrTy) {
875 ptr = atomics.castToAtomicIntPointer(ptr);
877 val1 = atomics.convertToAtomicIntPointer(val1);
880 if (shouldCastToIntPtrTy)
881 dest = atomics.castToAtomicIntPointer(dest);
884 }
else if (e->
getOp() == AtomicExpr::AO__atomic_test_and_set) {
886 "test_and_set.bool");
888 dest = atomics.createTempAlloca();
889 if (shouldCastToIntPtrTy)
890 dest = atomics.castToAtomicIntPointer(dest);
893 bool powerOf2Size = (size & (size - 1)) == 0;
894 bool useLibCall = !powerOf2Size || (size > 16);
911 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
912 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
913 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
914 e->
getOp() == AtomicExpr::AO__atomic_store ||
915 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
916 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
917 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
918 e->
getOp() == AtomicExpr::AO__atomic_clear;
919 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
920 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
921 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
922 e->
getOp() == AtomicExpr::AO__atomic_load ||
923 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
924 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
925 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
931 uint64_t ord = orderConst.
Val.
getInt().getZExtValue();
933 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
934 size,
static_cast<cir::MemOrder
>(ord));
937 isWeakExpr, orderFailExpr, size, isStore,
951 auto order = cir::MemOrder::SequentiallyConsistent;
964 cir::MemOrder order,
bool isVolatile,
968 mlir::Location loc = dest.
getPointer().getLoc();
973 AtomicInfo atomics(*
this, dest, loc);
974 LValue lvalue = atomics.getAtomicLValue();
979 atomics.emitCopyIntoMemory(rvalue);
984 if (atomics.shouldUseLibCall()) {
986 cgm.errorNYI(loc,
"emitAtomicStore: atomic store with library call");
991 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
994 Address addr = atomics.getAtomicAddress();
995 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
997 addr = atomics.castToAtomicIntPointer(addr);
1002 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1007 store.setMemOrder(order);
1012 store.setIsVolatile(
true);
1018 cgm.errorNYI(loc,
"emitAtomicStore: non-simple atomic lvalue");
1025 switch (atomics.getEvaluationKind()) {
1041 bool zeroed =
false;
1043 zeroed = atomics.emitMemSetZeroIfNecessary();
1044 dest = atomics.projectValue();
1059 llvm_unreachable(
"bad evaluation kind");
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
This trivial value class is used to represent the result of an expression that is evaluated.
Address getAggregateAddress() const
Return the value of the address of the aggregate.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isPointerType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool atomicScope()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.