27 CharUnits atomicAlign;
30 bool useLibCall =
true;
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (
auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
45 evaluationKind = cgf.getEvaluationKind(valueTy);
48 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
51 valueSizeInBits = valueTypeInfo.
Width;
52 atomicSizeInBits = atomicTypeInfo.
Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
61 this->lvalue = lvalue;
64 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
67 atomicSizeInBits, ctx.
toBits(lvalue.getAlignment()));
70 QualType getValueType()
const {
return valueTy; }
71 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
73 mlir::Value getAtomicPointer()
const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
79 bool shouldUseLibCall()
const {
return useLibCall; }
80 const LValue &getAtomicLValue()
const {
return lvalue; }
81 Address getAtomicAddress()
const {
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
87 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
98 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
100 bool emitMemSetZeroIfNecessary()
const;
102 mlir::Value getScalarRValValueOrNull(RValue rvalue)
const;
106 Address castToAtomicIntPointer(Address addr)
const;
111 Address convertToAtomicIntPointer(Address addr)
const;
114 mlir::Value convertRValueToInt(RValue rvalue,
bool cmpxchg =
false)
const;
117 void emitCopyIntoMemory(RValue rvalue)
const;
120 LValue projectValue()
const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
124 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
132 Address createTempAlloca()
const;
135 bool requiresMemSetZero(mlir::Type ty)
const;
151 uint64_t expectedSize) {
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
164 switch (getEvaluationKind()) {
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
177 llvm_unreachable(
"bad evaluation kind");
180Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
183 if (sourceSizeInBits != atomicSizeInBits) {
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
189 return castToAtomicIntPointer(addr);
192Address AtomicInfo::createTempAlloca()
const {
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
196 getAtomicAlignment(), loc,
"atomic-temp");
199 if (lvalue.isBitField()) {
200 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue)
const {
207 if (rvalue.
isScalar() && (!hasPadding() || !lvalue.isSimple()))
212Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
221bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
238 if (cir::isAnyFloatingPointType(valueTy))
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue,
bool cmpxchg)
const {
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
251 loc,
"AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
256 loc,
"AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
263 assert(lvalue.isSimple());
269 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
276 emitMemSetZeroIfNecessary();
279 LValue tempLValue = projectValue();
285 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
292 cir::MemOrder successOrder,
293 cir::MemOrder failureOrder) {
297 mlir::Value expected = builder.
createLoad(loc, val1);
298 mlir::Value desired = builder.
createLoad(loc, val2);
300 auto cmpxchg = cir::AtomicCmpXchgOp::create(
308 cmpxchg.setWeak(isWeak);
310 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
311 cir::IfOp::create(builder, loc, failed,
false,
312 [&](mlir::OpBuilder &, mlir::Location) {
313 auto ptrTy = mlir::cast<cir::PointerType>(
332 Expr *failureOrderExpr, uint64_t size,
333 cir::MemOrder successOrder) {
336 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
338 cir::MemOrder failureOrder;
340 failureOrder = cir::MemOrder::Relaxed;
342 switch ((cir::MemOrder)failureOrderInt) {
343 case cir::MemOrder::Relaxed:
346 case cir::MemOrder::Release:
347 case cir::MemOrder::AcquireRelease:
348 failureOrder = cir::MemOrder::Relaxed;
350 case cir::MemOrder::Consume:
351 case cir::MemOrder::Acquire:
352 failureOrder = cir::MemOrder::Acquire;
354 case cir::MemOrder::SequentiallyConsistent:
355 failureOrder = cir::MemOrder::SequentiallyConsistent;
371 "emitAtomicCmpXchgFailureSet: non-constant failure order");
376 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
377 cir::MemOrder order) {
378 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
381 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: atomic scope");
386 llvm::StringRef opName;
389 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
390 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
391 cir::AtomicFetchKindAttr fetchAttr;
392 bool fetchFirst =
true;
394 switch (
expr->getOp()) {
395 case AtomicExpr::AO__c11_atomic_init:
396 llvm_unreachable(
"already handled!");
398 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
400 val2, failureOrderExpr, size, order);
403 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
405 val2, failureOrderExpr, size, order);
408 case AtomicExpr::AO__atomic_compare_exchange:
409 case AtomicExpr::AO__atomic_compare_exchange_n: {
413 failureOrderExpr, size, order);
417 "emitAtomicOp: non-constant isWeak");
422 case AtomicExpr::AO__c11_atomic_load:
423 case AtomicExpr::AO__atomic_load_n:
424 case AtomicExpr::AO__atomic_load: {
430 load->setAttr(
"mem_order", orderAttr);
432 builder.
createStore(loc, load->getResult(0), dest);
436 case AtomicExpr::AO__c11_atomic_store:
437 case AtomicExpr::AO__atomic_store_n:
438 case AtomicExpr::AO__atomic_store: {
439 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
444 mlir::IntegerAttr{}, orderAttr);
448 case AtomicExpr::AO__c11_atomic_exchange:
449 case AtomicExpr::AO__atomic_exchange_n:
450 case AtomicExpr::AO__atomic_exchange:
451 opName = cir::AtomicXchgOp::getOperationName();
454 case AtomicExpr::AO__atomic_add_fetch:
457 case AtomicExpr::AO__c11_atomic_fetch_add:
458 case AtomicExpr::AO__atomic_fetch_add:
459 opName = cir::AtomicFetchOp::getOperationName();
460 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
461 cir::AtomicFetchKind::Add);
464 case AtomicExpr::AO__atomic_sub_fetch:
467 case AtomicExpr::AO__c11_atomic_fetch_sub:
468 case AtomicExpr::AO__atomic_fetch_sub:
469 opName = cir::AtomicFetchOp::getOperationName();
470 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
471 cir::AtomicFetchKind::Sub);
474 case AtomicExpr::AO__atomic_min_fetch:
477 case AtomicExpr::AO__c11_atomic_fetch_min:
478 case AtomicExpr::AO__atomic_fetch_min:
479 opName = cir::AtomicFetchOp::getOperationName();
480 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
481 cir::AtomicFetchKind::Min);
484 case AtomicExpr::AO__atomic_max_fetch:
487 case AtomicExpr::AO__c11_atomic_fetch_max:
488 case AtomicExpr::AO__atomic_fetch_max:
489 opName = cir::AtomicFetchOp::getOperationName();
490 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
491 cir::AtomicFetchKind::Max);
494 case AtomicExpr::AO__atomic_and_fetch:
497 case AtomicExpr::AO__c11_atomic_fetch_and:
498 case AtomicExpr::AO__atomic_fetch_and:
499 opName = cir::AtomicFetchOp::getOperationName();
500 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
501 cir::AtomicFetchKind::And);
504 case AtomicExpr::AO__atomic_or_fetch:
507 case AtomicExpr::AO__c11_atomic_fetch_or:
508 case AtomicExpr::AO__atomic_fetch_or:
509 opName = cir::AtomicFetchOp::getOperationName();
510 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
511 cir::AtomicFetchKind::Or);
514 case AtomicExpr::AO__atomic_xor_fetch:
517 case AtomicExpr::AO__c11_atomic_fetch_xor:
518 case AtomicExpr::AO__atomic_fetch_xor:
519 opName = cir::AtomicFetchOp::getOperationName();
520 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
521 cir::AtomicFetchKind::Xor);
524 case AtomicExpr::AO__atomic_nand_fetch:
527 case AtomicExpr::AO__c11_atomic_fetch_nand:
528 case AtomicExpr::AO__atomic_fetch_nand:
529 opName = cir::AtomicFetchOp::getOperationName();
530 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
531 cir::AtomicFetchKind::Nand);
534 case AtomicExpr::AO__atomic_test_and_set: {
535 auto op = cir::AtomicTestAndSetOp::create(
543 case AtomicExpr::AO__atomic_clear: {
544 cir::AtomicClearOp::create(
551 case AtomicExpr::AO__opencl_atomic_init:
553 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
554 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
556 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
557 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
559 case AtomicExpr::AO__scoped_atomic_compare_exchange:
560 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
562 case AtomicExpr::AO__opencl_atomic_load:
563 case AtomicExpr::AO__hip_atomic_load:
564 case AtomicExpr::AO__scoped_atomic_load_n:
565 case AtomicExpr::AO__scoped_atomic_load:
567 case AtomicExpr::AO__opencl_atomic_store:
568 case AtomicExpr::AO__hip_atomic_store:
569 case AtomicExpr::AO__scoped_atomic_store:
570 case AtomicExpr::AO__scoped_atomic_store_n:
572 case AtomicExpr::AO__hip_atomic_exchange:
573 case AtomicExpr::AO__opencl_atomic_exchange:
574 case AtomicExpr::AO__scoped_atomic_exchange_n:
575 case AtomicExpr::AO__scoped_atomic_exchange:
577 case AtomicExpr::AO__scoped_atomic_add_fetch:
579 case AtomicExpr::AO__hip_atomic_fetch_add:
580 case AtomicExpr::AO__opencl_atomic_fetch_add:
581 case AtomicExpr::AO__scoped_atomic_fetch_add:
583 case AtomicExpr::AO__scoped_atomic_sub_fetch:
585 case AtomicExpr::AO__hip_atomic_fetch_sub:
586 case AtomicExpr::AO__opencl_atomic_fetch_sub:
587 case AtomicExpr::AO__scoped_atomic_fetch_sub:
589 case AtomicExpr::AO__scoped_atomic_min_fetch:
591 case AtomicExpr::AO__hip_atomic_fetch_min:
592 case AtomicExpr::AO__opencl_atomic_fetch_min:
593 case AtomicExpr::AO__scoped_atomic_fetch_min:
595 case AtomicExpr::AO__scoped_atomic_max_fetch:
597 case AtomicExpr::AO__hip_atomic_fetch_max:
598 case AtomicExpr::AO__opencl_atomic_fetch_max:
599 case AtomicExpr::AO__scoped_atomic_fetch_max:
601 case AtomicExpr::AO__scoped_atomic_and_fetch:
603 case AtomicExpr::AO__hip_atomic_fetch_and:
604 case AtomicExpr::AO__opencl_atomic_fetch_and:
605 case AtomicExpr::AO__scoped_atomic_fetch_and:
607 case AtomicExpr::AO__scoped_atomic_or_fetch:
609 case AtomicExpr::AO__hip_atomic_fetch_or:
610 case AtomicExpr::AO__opencl_atomic_fetch_or:
611 case AtomicExpr::AO__scoped_atomic_fetch_or:
613 case AtomicExpr::AO__scoped_atomic_xor_fetch:
615 case AtomicExpr::AO__hip_atomic_fetch_xor:
616 case AtomicExpr::AO__opencl_atomic_fetch_xor:
617 case AtomicExpr::AO__scoped_atomic_fetch_xor:
619 case AtomicExpr::AO__scoped_atomic_nand_fetch:
621 case AtomicExpr::AO__scoped_atomic_fetch_nand:
626 assert(!opName.empty() &&
"expected operation name to build");
627 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
631 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
632 atomicOperands, atomicResTys);
635 rmwOp->setAttr(
"binop", fetchAttr);
636 rmwOp->setAttr(
"mem_order", orderAttr);
637 if (
expr->isVolatile())
638 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
639 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
640 rmwOp->setAttr(
"fetch_first", builder.getUnitAttr());
642 mlir::Value result = rmwOp->getResult(0);
649 auto memOrder =
static_cast<cir::MemOrder
>(order);
651 return memOrder != cir::MemOrder::Consume &&
652 memOrder != cir::MemOrder::Acquire &&
653 memOrder != cir::MemOrder::AcquireRelease;
655 return memOrder != cir::MemOrder::Release &&
656 memOrder != cir::MemOrder::AcquireRelease;
664 memTy = ty->getValueType();
666 Expr *isWeakExpr =
nullptr;
667 Expr *orderFailExpr =
nullptr;
675 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
689 bool shouldCastToIntPtrTy =
true;
691 switch (e->
getOp()) {
696 case AtomicExpr::AO__c11_atomic_init:
697 llvm_unreachable(
"already handled above with emitAtomicInit");
699 case AtomicExpr::AO__atomic_load_n:
700 case AtomicExpr::AO__c11_atomic_load:
701 case AtomicExpr::AO__atomic_test_and_set:
702 case AtomicExpr::AO__atomic_clear:
705 case AtomicExpr::AO__atomic_load:
709 case AtomicExpr::AO__atomic_store:
713 case AtomicExpr::AO__atomic_exchange:
718 case AtomicExpr::AO__atomic_compare_exchange:
719 case AtomicExpr::AO__atomic_compare_exchange_n:
720 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
721 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
723 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
724 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
729 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
730 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
731 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
732 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
736 case AtomicExpr::AO__c11_atomic_fetch_add:
737 case AtomicExpr::AO__c11_atomic_fetch_sub:
740 "atomic fetch-and-add and fetch-and-sub for pointers");
744 case AtomicExpr::AO__atomic_fetch_add:
745 case AtomicExpr::AO__atomic_fetch_max:
746 case AtomicExpr::AO__atomic_fetch_min:
747 case AtomicExpr::AO__atomic_fetch_sub:
748 case AtomicExpr::AO__atomic_add_fetch:
749 case AtomicExpr::AO__atomic_max_fetch:
750 case AtomicExpr::AO__atomic_min_fetch:
751 case AtomicExpr::AO__atomic_sub_fetch:
752 case AtomicExpr::AO__c11_atomic_fetch_max:
753 case AtomicExpr::AO__c11_atomic_fetch_min:
757 case AtomicExpr::AO__atomic_fetch_and:
758 case AtomicExpr::AO__atomic_fetch_nand:
759 case AtomicExpr::AO__atomic_fetch_or:
760 case AtomicExpr::AO__atomic_fetch_xor:
761 case AtomicExpr::AO__atomic_and_fetch:
762 case AtomicExpr::AO__atomic_nand_fetch:
763 case AtomicExpr::AO__atomic_or_fetch:
764 case AtomicExpr::AO__atomic_xor_fetch:
765 case AtomicExpr::AO__atomic_exchange_n:
766 case AtomicExpr::AO__atomic_store_n:
767 case AtomicExpr::AO__c11_atomic_fetch_and:
768 case AtomicExpr::AO__c11_atomic_fetch_nand:
769 case AtomicExpr::AO__c11_atomic_fetch_or:
770 case AtomicExpr::AO__c11_atomic_fetch_xor:
771 case AtomicExpr::AO__c11_atomic_exchange:
772 case AtomicExpr::AO__c11_atomic_store:
785 if (shouldCastToIntPtrTy) {
786 ptr = atomics.castToAtomicIntPointer(ptr);
788 val1 = atomics.convertToAtomicIntPointer(val1);
791 if (shouldCastToIntPtrTy)
792 dest = atomics.castToAtomicIntPointer(dest);
795 }
else if (e->
getOp() == AtomicExpr::AO__atomic_test_and_set) {
797 "test_and_set.bool");
799 dest = atomics.createTempAlloca();
800 if (shouldCastToIntPtrTy)
801 dest = atomics.castToAtomicIntPointer(dest);
804 bool powerOf2Size = (size & (size - 1)) == 0;
805 bool useLibCall = !powerOf2Size || (size > 16);
822 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
823 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
824 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
825 e->
getOp() == AtomicExpr::AO__atomic_store ||
826 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
827 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
828 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
829 e->
getOp() == AtomicExpr::AO__atomic_clear;
830 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
831 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
832 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
833 e->
getOp() == AtomicExpr::AO__atomic_load ||
834 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
835 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
836 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
842 uint64_t ord = orderConst.
Val.
getInt().getZExtValue();
844 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
845 size,
static_cast<cir::MemOrder
>(ord));
862 auto order = cir::MemOrder::SequentiallyConsistent;
875 cir::MemOrder order,
bool isVolatile,
879 mlir::Location loc = dest.
getPointer().getLoc();
884 AtomicInfo atomics(*
this, dest, loc);
885 LValue lvalue = atomics.getAtomicLValue();
890 atomics.emitCopyIntoMemory(rvalue);
895 if (atomics.shouldUseLibCall()) {
897 cgm.errorNYI(loc,
"emitAtomicStore: atomic store with library call");
902 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
905 Address addr = atomics.getAtomicAddress();
906 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
908 addr = atomics.castToAtomicIntPointer(addr);
913 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
918 store.setMemOrder(order);
923 store.setIsVolatile(
true);
929 cgm.errorNYI(loc,
"emitAtomicStore: non-simple atomic lvalue");
936 switch (atomics.getEvaluationKind()) {
954 zeroed = atomics.emitMemSetZeroIfNecessary();
955 dest = atomics.projectValue();
970 llvm_unreachable(
"bad evaluation kind");
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
This trivial value class is used to represent the result of an expression that is evaluated.
Address getAggregateAddress() const
Return the value of the address of the aggregate.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isPointerType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool atomicScope()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.